6.1-stable review patch. If anyone has any objections, please let me know. ------------------ From: Bart Van Assche <bvanassche@xxxxxxx> commit 6259151c04d4e0085e00d2dcb471ebdd1778e72e upstream. Call .limit_depth() after data->hctx has been set such that data->hctx can be used in .limit_depth() implementations. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Damien Le Moal <dlemoal@xxxxxxxxxx> Cc: Zhiguo Niu <zhiguo.niu@xxxxxxxxxx> Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests") Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> Tested-by: Zhiguo Niu <zhiguo.niu@xxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Link: https://lore.kernel.org/r/20240509170149.7639-2-bvanassche@xxxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- block/blk-mq.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -439,6 +439,7 @@ __blk_mq_alloc_requests_batch(struct blk static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *) = NULL; struct request_queue *q = data->q; u64 alloc_time_ns = 0; struct request *rq; @@ -465,7 +466,7 @@ static struct request *__blk_mq_alloc_re !blk_op_is_passthrough(data->cmd_flags) && e->type->ops.limit_depth && !(data->flags & BLK_MQ_REQ_RESERVED)) - e->type->ops.limit_depth(data->cmd_flags, data); + limit_depth = e->type->ops.limit_depth; } retry: @@ -477,6 +478,9 @@ retry: if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; + if (limit_depth) + limit_depth(data->cmd_flags, data); + /* * Try batched alloc if we want more than 1 tag. */