In the following patch, we will use blk_mq_try_issue_directly() for DM to return the dispatch result, and DM need this informatin to improve IO merge. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d27c1a265d54..444a4bf9705f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1711,13 +1711,13 @@ static bool __blk_mq_issue_req(struct blk_mq_hw_ctx *hctx, return false; } -static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, - blk_qc_t *cookie) +static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + struct request *rq, + blk_qc_t *cookie) { struct request_queue *q = rq->q; blk_qc_t new_cookie; - blk_status_t ret; + blk_status_t ret = BLK_STS_OK; bool run_queue = true; /* RCU or SRCU read lock is needed before checking quiesced flag */ @@ -1740,31 +1740,36 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, switch (ret) { case BLK_STS_OK: *cookie = new_cookie; - return; + return ret; case BLK_STS_RESOURCE: __blk_mq_requeue_request(rq); goto insert; default: *cookie = BLK_QC_T_NONE; blk_mq_end_request(rq, ret); - return; + return ret; } insert: blk_mq_sched_insert_request(rq, false, run_queue, false, hctx->flags & BLK_MQ_F_BLOCKING); + return ret; } -static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, blk_qc_t *cookie) +static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + struct request *rq, + blk_qc_t *cookie) { int srcu_idx; + blk_status_t ret; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); hctx_lock(hctx, &srcu_idx); - __blk_mq_try_issue_directly(hctx, rq, cookie); + ret = __blk_mq_try_issue_directly(hctx, rq, cookie); hctx_unlock(hctx, srcu_idx); + + return ret; } static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) -- 2.9.5