If the current io scheduler is 'none', the max allowed 'nr_requests' should be from set->queue_depth, instead of the tags's depth, which can be adjusted before. This patch fixes this issue by using set->queue_depth as the max allowed 'nr_requests', meantime updating q->nr_requests when switching to 'none' scheduler. Cc: Marco Patalano <mpatalan@xxxxxxxxxx> Cc: "Ewan D. Milne" <emilne@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- This issue can be reproduced by the blktests block/021, which will be posted soon. block/blk-mq-sched.c | 1 + block/blk-mq-tag.c | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index b0f2c2a40a0c..b087d8884a08 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -585,6 +585,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) if (!e) { q->elevator = NULL; + q->nr_requests = q->tag_set->queue_depth; return 0; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a4e58fc28a06..a178d862a29a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -423,7 +423,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tagsptr, unsigned int tdepth, bool can_grow) { + struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *tags = *tagsptr; + unsigned int max_depth; if (tdepth <= tags->nr_reserved_tags) return -EINVAL; @@ -434,8 +436,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * If we are allowed to grow beyond the original size, allocate * a new set of tags before freeing the old one. */ - if (tdepth > tags->nr_tags) { - struct blk_mq_tag_set *set = hctx->queue->tag_set; + max_depth = can_grow ? tags->nr_tags: set->queue_depth; + if (tdepth > max_depth) { struct blk_mq_tags *new; bool ret; -- 2.9.5