The following patch will propose some hints to figure out default queue depth for scheduler queue, so introduce helper of blk_mq_sched_queue_depth() for this purpose. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-sched.c | 8 +------- block/blk-mq-sched.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4ab69435708c..9ca63926452a 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -514,13 +514,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; } - /* - * Default to double of smaller one between hw queue_depth and 128, - * since we don't split into sync/async like the old code did. - * Additionally, this is a per-hw queue depth. - */ - q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, - BLKDEV_MAX_RQ); + q->nr_requests = blk_mq_sched_queue_depth(q); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 9267d0b7c197..1d47f3fda1d0 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -96,4 +96,15 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } +static inline unsigned blk_mq_sched_queue_depth(struct request_queue *q) +{ + /* + * Default to double of smaller one between hw queue_depth and 128, + * since we don't split into sync/async like the old code did. + * Additionally, this is a per-hw queue depth. + */ + return 2 * min_t(unsigned int, q->tag_set->queue_depth, + BLKDEV_MAX_RQ); +} + #endif -- 2.9.4