We will try to get request from cache before alloc request, in both cases will first attempt bio merge. This patch move this common part to blk_mq_submit_bio(), which simplify the code and avoid passing in the pointer of bio. Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> --- block/blk-mq.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 5ee62b95f3e5..aa091615e20b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2716,8 +2716,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q, static struct request *blk_mq_get_new_requests(struct request_queue *q, struct blk_plug *plug, - struct bio *bio, - unsigned int nsegs) + struct bio *bio) { struct blk_mq_alloc_data data = { .q = q, @@ -2729,9 +2728,6 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, if (unlikely(bio_queue_enter(bio))) return NULL; - if (blk_mq_attempt_bio_merge(q, bio, nsegs)) - goto queue_exit; - rq_qos_throttle(q, bio); if (plug) { @@ -2746,13 +2742,13 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); -queue_exit: + blk_queue_exit(q); return NULL; } static inline struct request *blk_mq_get_cached_request(struct request_queue *q, - struct blk_plug *plug, struct bio **bio, unsigned int nsegs) + struct blk_plug *plug, struct bio *bio) { struct request *rq; @@ -2762,14 +2758,9 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, if (!rq || rq->q != q) return NULL; - if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { - *bio = NULL; - return NULL; - } - - if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) + if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type) return NULL; - if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) + if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) return NULL; /* @@ -2778,9 +2769,9 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, * before we throttle. */ plug->cached_rq = rq_list_next(rq); - rq_qos_throttle(q, *bio); + rq_qos_throttle(q, bio); - rq->cmd_flags = (*bio)->bi_opf; + rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); return rq; } @@ -2824,11 +2815,12 @@ void blk_mq_submit_bio(struct bio *bio) bio_set_ioprio(bio); - rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); + if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) + return; + + rq = blk_mq_get_cached_request(q, plug, bio); if (!rq) { - if (!bio) - return; - rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); + rq = blk_mq_get_new_requests(q, plug, bio); if (unlikely(!rq)) return; } -- 2.36.1