The only request allocation user which provides data->ctx and data->hctx is blk_mq_alloc_request_hctx, now we have kill such use, so set data->ctx and data->hctx explicitly in blk_mq_get_request() Cc: Bart Van Assche <bvanassche@xxxxxxx> Cc: Hannes Reinecke <hare@xxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: John Garry <john.garry@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e2e1b6808b32..35966af878c6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -354,7 +354,6 @@ static struct request *blk_mq_get_request(struct request_queue *q, struct elevator_queue *e = q->elevator; struct request *rq; unsigned int tag; - bool clear_ctx_on_error = false; u64 alloc_time_ns = 0; blk_queue_enter_live(q); @@ -364,13 +363,10 @@ static struct request *blk_mq_get_request(struct request_queue *q, alloc_time_ns = ktime_get_ns(); data->q = q; - if (likely(!data->ctx)) { - data->ctx = blk_mq_get_ctx(q); - clear_ctx_on_error = true; - } - if (likely(!data->hctx)) - data->hctx = blk_mq_map_queue(q, data->cmd_flags, - data->ctx); + + WARN_ON_ONCE(data->ctx || data->hctx); + data->ctx = blk_mq_get_ctx(q); + data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; @@ -392,8 +388,8 @@ static struct request *blk_mq_get_request(struct request_queue *q, tag = blk_mq_get_tag(data); if (tag == BLK_MQ_TAG_FAIL) { - if (clear_ctx_on_error) - data->ctx = NULL; + data->ctx = NULL; + data->hctx = NULL; blk_queue_exit(q); return NULL; } -- 2.25.2