Call .limit_depth() after data->hctx has been set such that data->hctx can be used in .limit_depth() implementations. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Damien Le Moal <dlemoal@xxxxxxxxxx> Cc: Zhiguo Niu <zhiguo.niu@xxxxxxxxxx> Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests") Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-mq.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 34060d885c5a..bcaa722896a0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -434,6 +434,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *) = NULL; struct request_queue *q = data->q; u64 alloc_time_ns = 0; struct request *rq; @@ -459,13 +460,11 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) */ if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && !blk_op_is_passthrough(data->cmd_flags)) { - struct elevator_mq_ops *ops = &q->elevator->type->ops; + limit_depth = q->elevator->type->ops.limit_depth; WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); data->rq_flags |= RQF_USE_SCHED; - if (ops->limit_depth) - ops->limit_depth(data->cmd_flags, data); } } @@ -478,6 +477,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; + if (limit_depth) + limit_depth(data->cmd_flags, data); + /* * Try batched alloc if we want more than 1 tag. */