SCSI sets q->queue_depth from shost->cmd_per_lun, and q->queue_depth is per request_queue and more related to scheduler queue compared with hw queue depth, which can be shared by queues, such as TAG_SHARED. This patch trys to use q->queue_depth as hint for computing q->nr_requests, which should be more effective than current way. Reviewed-by: Christoph Hellwig <hch@xxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-sched.h | 18 +++++++++++++++--- block/blk-mq.c | 27 +++++++++++++++++++++++++-- block/blk-mq.h | 1 + block/blk-settings.c | 2 ++ 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 1d47f3fda1d0..bb772e680e01 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -99,12 +99,24 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline unsigned blk_mq_sched_queue_depth(struct request_queue *q) { /* - * Default to double of smaller one between hw queue_depth and 128, + * q->queue_depth is more close to scheduler queue, so use it + * as hint for computing scheduler queue depth if it is valid + */ + unsigned q_depth = q->queue_depth ?: q->tag_set->queue_depth; + + /* + * Default to double of smaller one between queue depth and 128, * since we don't split into sync/async like the old code did. * Additionally, this is a per-hw queue depth. */ - return 2 * min_t(unsigned int, q->tag_set->queue_depth, - BLKDEV_MAX_RQ); + q_depth = 2 * min_t(unsigned int, q_depth, BLKDEV_MAX_RQ); + + /* + * when queue depth of driver is too small, we set queue depth + * of scheduler queue as 32 so that small queue device still + * can benefit from IO merging. + */ + return max_t(unsigned, q_depth, 32); } #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 86b8fdcb8434..7df68d31bc23 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2593,7 +2593,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) } EXPORT_SYMBOL(blk_mq_free_tag_set); -int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +static int __blk_mq_update_nr_requests(struct request_queue *q, + bool sched_only, + unsigned int nr) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; @@ -2612,7 +2614,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ - if (!hctx->sched_tags) { + if (!sched_only && !hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, min(nr, set->queue_depth), false); @@ -2632,6 +2634,27 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) return ret; } +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + return __blk_mq_update_nr_requests(q, false, nr); +} + +/* + * When drivers update q->queue_depth, this API is called so that + * we can use this queue depth as hint for adjusting scheduler + * queue depth. + */ +int blk_mq_update_sched_queue_depth(struct request_queue *q) +{ + unsigned nr; + + if (!q->mq_ops || !q->elevator) + return 0; + + nr = blk_mq_sched_queue_depth(q); + return __blk_mq_update_nr_requests(q, true, nr); +} + static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 0c398f29dc4b..44d3aaa03d7c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -36,6 +36,7 @@ bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, bool wait); struct request *blk_mq_dispatch_rq_from_ctxs(struct blk_mq_hw_ctx *hctx); +int blk_mq_update_sched_queue_depth(struct request_queue *q); /* * Internal helpers for allocating/freeing the request map diff --git a/block/blk-settings.c b/block/blk-settings.c index be1f115b538b..94a349601545 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -877,6 +877,8 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth) { q->queue_depth = depth; wbt_set_queue_depth(q->rq_wb, depth); + + WARN_ON(blk_mq_update_sched_queue_depth(q)); } EXPORT_SYMBOL(blk_set_queue_depth); -- 2.9.4