We could have a race here, where the request gets freed before we call into blk_mq_run_hw_queue(). If this happens, we cannot rely on the state of the request. Grab the hardware context before inserting the flush. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- diff --git a/block/blk-mq.c b/block/blk-mq.c index 2197cfbf081f..22b30a89bf3a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2468,9 +2468,10 @@ void blk_mq_submit_bio(struct bio *bio) } if (unlikely(is_flush_fua)) { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* Bypass scheduler for flush requests */ blk_insert_flush(rq); - blk_mq_run_hw_queue(rq->mq_hctx, true); + blk_mq_run_hw_queue(hctx, true); } else if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { -- Jens Axboe