From: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Move the non-"new_io" branch of blk_account_io_start() into separate function. Fix merge accounting for discards (they were counted as write merges). The new blk_account_io_merge_bio() doesn't call update_io_ticks() unlike blk_account_io_start(), as there is no reason for that. Signed-off-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> [hch: rebased] Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- block/blk-core.c | 25 ++++++++++++++++--------- block/blk-exec.c | 2 +- block/blk-mq.c | 2 +- block/blk.h | 2 +- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index c1675d43c2da0..bf2f7d4bc0c1c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -636,6 +636,16 @@ void blk_put_request(struct request *req) } EXPORT_SYMBOL(blk_put_request); +static void blk_account_io_merge_bio(struct request *req) +{ + if (!blk_do_io_stat(req)) + return; + + part_stat_lock(); + part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); + part_stat_unlock(); +} + bool bio_attempt_back_merge(struct request *req, struct bio *bio, unsigned int nr_segs) { @@ -656,7 +666,7 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio, bio_crypt_free_ctx(bio); - blk_account_io_start(req, false); + blk_account_io_merge_bio(req); return true; } @@ -682,7 +692,7 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio, bio_crypt_do_front_merge(req, bio); - blk_account_io_start(req, false); + blk_account_io_merge_bio(req); return true; } @@ -704,7 +714,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, req->__data_len += bio->bi_iter.bi_size; req->nr_phys_segments = segments + 1; - blk_account_io_start(req, false); + blk_account_io_merge_bio(req); return true; no_merge: req_set_nomerge(q, req); @@ -1329,7 +1339,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * return BLK_STS_IOERR; if (blk_queue_io_stat(q)) - blk_account_io_start(rq, true); + blk_account_io_start(rq); /* * Since we have a scheduler attached on the top device, @@ -1433,16 +1443,13 @@ void blk_account_io_done(struct request *req, u64 now) } } -void blk_account_io_start(struct request *rq, bool new_io) +void blk_account_io_start(struct request *rq) { if (!blk_do_io_stat(rq)) return; part_stat_lock(); - if (!new_io) - part_stat_inc(rq->part, merges[rq_data_dir(rq)]); - else - rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); update_io_ticks(rq->part, jiffies, false); part_stat_unlock(); } diff --git a/block/blk-exec.c b/block/blk-exec.c index e20a852ae432d..85324d53d072f 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, rq->rq_disk = bd_disk; rq->end_io = done; - blk_account_io_start(rq, true); + blk_account_io_start(rq); /* * don't check dying flag for MQ because the request won't diff --git a/block/blk-mq.c b/block/blk-mq.c index cac11945f6023..c606c74463ccd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1822,7 +1822,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, blk_rq_bio_prep(rq, bio, nr_segs); blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); - blk_account_io_start(rq, true); + blk_account_io_start(rq); } static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk.h b/block/blk.h index 0ecba2ab383d6..428f7e5d70a86 100644 --- a/block/blk.h +++ b/block/blk.h @@ -185,7 +185,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq); -void blk_account_io_start(struct request *req, bool new_io); +void blk_account_io_start(struct request *req); void blk_account_io_done(struct request *req, u64 now); /* -- 2.26.2 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel