[PATCH 5/5] block: Make blkdev_issue_discard() submit aligned discard requests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Split discard requests as follows:
* If the start sector is not aligned, an initial write request up
  to the first aligned sector.
* A discard request from the first aligned sector in the range up
  to the last aligned sector in the discarded range.
* If the end sector is not aligned, a final write request from the
  last aligned sector up to the end.

Note: if the start and/or end sectors are not aligned and if the
range is small enough the discard request will be submitted with
bi_size == 0.

Signed-off-by: Bart Van Assche <bart.vanassche@xxxxxxxxxxx>
Cc: Jan Kara <jack@xxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Mike Snitzer <snitzer@xxxxxxxxxx>
Cc: Martin K. Petersen <martin.petersen@xxxxxxxxxx>
Cc: Dmitry Monakhov <dmonakhov@xxxxxxxxxx>
Cc: Darrick J. Wong <darrick.wong@xxxxxxxxxx>
Cc: Sagi Grimberg <sagi@xxxxxxxxxxx>
---
 block/blk-lib.c   |  4 ++--
 block/blk-merge.c | 55 ++++++++++++++++++++++++++++++-------------------------
 block/blk.h       |  3 +++
 3 files changed, 35 insertions(+), 27 deletions(-)

diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9a93ca4..d78ded5 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -30,7 +30,7 @@ static void bio_batch_end_io(struct bio *bio)
  * Return the largest number that is less than or equal to @s and for which
  * the remainder of the division by @granularity is @alignment.
  */
-static sector_t blk_round_sect_down(sector_t s, u32 granularity, u32 alignment)
+sector_t blk_round_sect_down(sector_t s, u32 granularity, u32 alignment)
 {
 	sector_t tmp = s, res = s;
 	u32 remainder;
@@ -219,7 +219,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
 
-static void bio_add_zero_pages(struct bio *bio, sector_t nr_sects)
+void bio_add_zero_pages(struct bio *bio, sector_t nr_sects)
 {
 	unsigned int sz;
 	int ret;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2613531..fd15606 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -16,42 +16,47 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
 					 struct bio_set *bs,
 					 unsigned *nsegs)
 {
+	struct bio *wr;
 	unsigned int max_discard_sectors, granularity;
 	int alignment;
-	sector_t tmp;
-	unsigned split_sectors;
+	sector_t start, start_r, end, end_r, skip;
 
 	*nsegs = 1;
 
 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 	granularity = max(q->limits.discard_granularity >> 9, 1U);
-
+	alignment = (q->limits.discard_alignment >> 9) % granularity;
 	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-	max_discard_sectors -= max_discard_sectors % granularity;
-
-	if (unlikely(!max_discard_sectors)) {
-		/* XXX: warn */
-		return NULL;
-	}
-
-	if (bio_sectors(bio) <= max_discard_sectors)
-		return NULL;
-
-	split_sectors = max_discard_sectors;
+	WARN_ON_ONCE(max_discard_sectors == 0);
 
 	/*
-	 * If the next starting sector would be misaligned, stop the discard at
-	 * the previous aligned sector.
+	 * If the start or end sector are misaligned, issue a write same
+	 * same request if the discard_zeroes_data flag has been set.
 	 */
-	alignment = (q->limits.discard_alignment >> 9) % granularity;
-
-	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
-	tmp = sector_div(tmp, granularity);
-
-	if (split_sectors > tmp)
-		split_sectors -= tmp;
-
-	return bio_split(bio, split_sectors, GFP_NOIO, bs);
+	start = bio->bi_iter.bi_sector;
+	start_r = blk_round_sect_down(start, granularity, alignment);
+	end = start + min(max_discard_sectors, bio_sectors(bio));
+	end_r = blk_round_sect_down(end, granularity, alignment);
+	if (start == start_r && start < end_r) {
+		if (end == end_r && bio_sectors(bio) == end_r - start)
+			return NULL;
+		return bio_split(bio, end_r - start, GFP_NOIO, bs);
+	}
+	if (q->limits.discard_zeroes_data && start < end) {
+		end = min(end, start_r + granularity);
+		wr = bio_alloc_bioset(GFP_NOIO, end - start, bs);
+		if (WARN_ON_ONCE(!wr))
+			return NULL;
+		wr->bi_rw = REQ_WRITE;
+		wr->bi_iter.bi_sector = start;
+		wr->bi_bdev = bio->bi_bdev;
+		bio_add_zero_pages(wr, end - start);
+		bio_advance(bio, wr->bi_iter.bi_size);
+		return wr;
+	}
+	skip = (min(start_r + granularity, end) - start) << 9;
+	bio_advance(bio, skip);
+	return NULL;
 }
 
 static struct bio *blk_bio_write_same_split(struct request_queue *q,
diff --git a/block/blk.h b/block/blk.h
index 70e4aee..31b13f9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -36,6 +36,9 @@ extern struct kmem_cache *request_cachep;
 extern struct kobj_type blk_queue_ktype;
 extern struct ida blk_queue_ida;
 
+sector_t blk_round_sect_down(sector_t s, u32 granularity, u32 alignment);
+void bio_add_zero_pages(struct bio *bio, sector_t nr_sects);
+
 static inline struct blk_flush_queue *blk_get_flush_queue(
 		struct request_queue *q, struct blk_mq_ctx *ctx)
 {
-- 
2.8.1

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux