SCSI sets q->queue_depth from shost->cmd_per_lun, and q->queue_depth is per request_queue and more related to scheduler queue compared with hw queue depth, which can be shared by queues, such as TAG_SHARED. This patch tries to use q->queue_depth as hint for computing q->nr_requests, which should be more effective than current way. Reviewed-by: Bart Van Assche <bart.vanassche@xxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Tested-by: Oleksandr Natalenko <oleksandr@xxxxxxxxxxxxxx> Tested-by: Tom Nguyen <tom81094@xxxxxxxxx> Tested-by: Paolo Valente <paolo.valente@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-sched.h | 18 +++++++++++++++--- block/blk-mq.c | 27 +++++++++++++++++++++++++-- block/blk-mq.h | 1 + block/blk-settings.c | 2 ++ 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 1d47f3fda1d0..906b10c54f78 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -99,12 +99,24 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline unsigned blk_mq_sched_queue_depth(struct request_queue *q) { /* - * Default to double of smaller one between hw queue_depth and 128, + * q->queue_depth is more close to scheduler queue, so use it + * as hint for computing scheduler queue depth if it is valid + */ + unsigned q_depth = q->queue_depth ?: q->tag_set->queue_depth; + + /* + * Default to double of smaller one between queue depth and 128, * since we don't split into sync/async like the old code did. * Additionally, this is a per-hw queue depth. */ - return 2 * min_t(unsigned int, q->tag_set->queue_depth, - BLKDEV_MAX_RQ); + q_depth = 2 * min_t(unsigned int, q_depth, BLKDEV_MAX_RQ); + + /* + * when queue depth of driver is too small, we set queue depth + * of scheduler queue as 128 which is the default setting of + * block legacy code. + */ + return max_t(unsigned, q_depth, BLKDEV_MAX_RQ); } #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 7cb3f87334c0..561a663cdd0e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2694,7 +2694,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) } EXPORT_SYMBOL(blk_mq_free_tag_set); -int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +static int __blk_mq_update_nr_requests(struct request_queue *q, + bool sched_only, + unsigned int nr) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; @@ -2713,7 +2715,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ - if (!hctx->sched_tags) { + if (!sched_only && !hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, min(nr, set->queue_depth), false); @@ -2733,6 +2735,27 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) return ret; } +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + return __blk_mq_update_nr_requests(q, false, nr); +} + +/* + * When drivers update q->queue_depth, this API is called so that + * we can use this queue depth as hint for adjusting scheduler + * queue depth. + */ +int blk_mq_update_sched_queue_depth(struct request_queue *q) +{ + unsigned nr; + + if (!q->mq_ops || !q->elevator) + return 0; + + nr = blk_mq_sched_queue_depth(q); + return __blk_mq_update_nr_requests(q, true, nr); +} + static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 915de58572e7..5bca6ce1f01d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -37,6 +37,7 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, bool wait); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); +int blk_mq_update_sched_queue_depth(struct request_queue *q); /* * Internal helpers for allocating/freeing the request map diff --git a/block/blk-settings.c b/block/blk-settings.c index 8559e9563c52..c2db38d2ec2b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -878,6 +878,8 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth) { q->queue_depth = depth; wbt_set_queue_depth(q->rq_wb, depth); + + WARN_ON(blk_mq_update_sched_queue_depth(q)); } EXPORT_SYMBOL(blk_set_queue_depth); -- 2.9.5