It doesn't seem necessary to have the redundant layer of splitting. The request size will even be more consistent / aligned to the cap. Signed-off-by: Tom Yan <tom.ty89@xxxxxxxxx> --- block/blk-lib.c | 11 +++++++++-- block/blk-merge.c | 2 +- block/blk.h | 8 ++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/block/blk-lib.c b/block/blk-lib.c index e90614fd8d6a..cbf55c9f0d6f 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -85,12 +85,19 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, * is split in device drive, the split ones are very probably * to be aligned to discard_granularity of the device's queue. */ - if (granularity_aligned_lba == sector_mapped) + if (granularity_aligned_lba == sector_mapped) { req_sects = min_t(sector_t, nr_sects, bio_aligned_discard_max_sectors(q)); - else + if (!req_sects) + return -EOPNOTSUPP; + } else { req_sects = min_t(sector_t, nr_sects, granularity_aligned_lba - sector_mapped); + } + + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U); + req_sects = round_down(req_sects, granularity); WARN_ON_ONCE((req_sects << 9) > UINT_MAX); diff --git a/block/blk-merge.c b/block/blk-merge.c index 97b7c2821565..f4e030fe6399 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -59,6 +59,7 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio) return bio_will_gap(req->q, NULL, bio, req->bio); } +/* deprecated */ static struct bio *blk_bio_discard_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, @@ -303,7 +304,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) switch (bio_op(*bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: - split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); break; case REQ_OP_WRITE_ZEROES: split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, diff --git a/block/blk.h b/block/blk.h index dfab98465db9..508371fafdf3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -281,8 +281,12 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) static inline unsigned int bio_aligned_discard_max_sectors( struct request_queue *q) { - return round_down(UINT_MAX, q->limits.discard_granularity) >> - SECTOR_SHIFT; + unsigned int discard_max_sectors, granularity; + discard_max_sectors = min(q->limits.max_discard_sectors, + bio_allowed_max_sectors(q)); + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U); + return round_down(discard_max_sectors, granularity); } /* -- 2.29.2