Combine blk_mq_sched_bio_merge() and blk_attempt_plug_merge() under a common if, so we don't check it twice. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- block/blk-mq-sched.c | 2 +- block/blk-mq-sched.h | 12 +----------- block/blk-mq.c | 15 +++++++-------- 3 files changed, 9 insertions(+), 20 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index e85b7556b096..5b259fdea794 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -361,7 +361,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) } } -bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, +bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct elevator_queue *e = q->elevator; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 98836106b25f..25d1034952b6 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -12,7 +12,7 @@ void blk_mq_sched_assign_ioc(struct request *rq); bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request); -bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, +bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free); @@ -42,16 +42,6 @@ static inline bool bio_mergeable(struct bio *bio) return !(bio->bi_opf & REQ_NOMERGE_FLAGS); } -static inline bool -blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs) -{ - if (blk_queue_nomerges(q) || !bio_mergeable(bio)) - return false; - - return __blk_mq_sched_bio_merge(q, bio, nr_segs); -} - static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) diff --git a/block/blk-mq.c b/block/blk-mq.c index a71aeed7b987..f159d007a015 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2481,7 +2481,6 @@ void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); const int is_sync = op_is_sync(bio->bi_opf); - const int is_flush_fua = op_is_flush(bio->bi_opf); struct request *rq; struct blk_plug *plug; bool same_queue_rq = false; @@ -2495,12 +2494,12 @@ void blk_mq_submit_bio(struct bio *bio) if (!bio_integrity_prep(bio)) goto queue_exit; - if (!is_flush_fua && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) - goto queue_exit; - - if (blk_mq_sched_bio_merge(q, bio, nr_segs)) - goto queue_exit; + if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { + if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) + goto queue_exit; + if (blk_mq_sched_bio_merge(q, bio, nr_segs)) + goto queue_exit; + } rq_qos_throttle(q, bio); @@ -2543,7 +2542,7 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (is_flush_fua && blk_insert_flush(rq)) + if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) return; if (plug && (q->nr_hw_queues == 1 || -- 2.33.1