SCSI devices often provides one per-request_queue depth via q->queue_depth(.cmd_per_lun), which is a global limit on all hw queues. After the pending I/O submitted to one rquest queue reaches this limit, BLK_STS_RESOURCE will be returned to all dispatch path. That means when one hw queue is stuck, actually all hctxs are stuck too. This flag is introduced for improving blk-mq IO scheduling on this kind of device. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-debugfs.c | 1 + block/blk-mq-sched.c | 2 +- block/blk-mq.c | 25 ++++++++++++++++++++++--- include/linux/blk-mq.h | 1 + 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 7932782c4614..febcaa7bfc82 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -210,6 +210,7 @@ static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(SG_MERGE), HCTX_FLAG_NAME(BLOCKING), HCTX_FLAG_NAME(NO_SCHED), + HCTX_FLAG_NAME(SHARED_DEPTH), }; #undef HCTX_FLAG_NAME diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 2eda942357c3..18d997679d7d 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -201,7 +201,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) * is busy, which can be triggeredd easily by * limit of of request_queue's queue depth */ - if (!q->queue_depth) { + if (!(hctx->flags & BLK_MQ_F_SHARED_DEPTH)) { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list); } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index da2fa175d78f..b535587570b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2656,12 +2656,31 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) int blk_mq_update_sched_queue_depth(struct request_queue *q) { unsigned nr; + struct blk_mq_hw_ctx *hctx; + unsigned int i; + int ret = 0; - if (!q->mq_ops || !q->elevator) - return 0; + if (!q->mq_ops) + return ret; + + blk_mq_freeze_queue(q); + /* + * if there is q->queue_depth, all hw queues share + * this queue depth limit + */ + if (q->queue_depth) { + queue_for_each_hw_ctx(q, hctx, i) + hctx->flags |= BLK_MQ_F_SHARED_DEPTH; + } + + if (!q->elevator) + goto exit; nr = blk_mq_sched_queue_depth(q); - return __blk_mq_update_nr_requests(q, true, nr); + ret = __blk_mq_update_nr_requests(q, true, nr); + exit: + blk_mq_unfreeze_queue(q); + return ret; } static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index cae063482b82..1197e5dee015 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -164,6 +164,7 @@ enum { BLK_MQ_F_SG_MERGE = 1 << 2, BLK_MQ_F_BLOCKING = 1 << 5, BLK_MQ_F_NO_SCHED = 1 << 6, + BLK_MQ_F_SHARED_DEPTH = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, -- 2.9.4