blk_mq_free_requests() calls dd_finish_request() indirectly. Prevent nested locking of dd->lock and dd->zone_lock by unlocking dd->lock before calling blk_mq_free_requests(). Cc: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/mq-deadline.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index dbc0feca963e..56cc29953e15 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -758,6 +758,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, */ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags) + __must_hold(dd->lock) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; @@ -784,7 +785,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } if (blk_mq_sched_try_insert_merge(q, rq, &free)) { + spin_unlock(&dd->lock); blk_mq_free_requests(&free); + spin_lock(&dd->lock); return; }