From: Pavel Begunkov <asml.silence@xxxxxxxxx> commit 907d1df30a51cc1a1d25414a00cde0494b83df7b upstream. Joseph reports following deadlock: CPU0: ... io_kill_linked_timeout // &ctx->completion_lock io_commit_cqring __io_queue_deferred __io_queue_async_work io_wq_enqueue io_wqe_enqueue // &wqe->lock CPU1: ... __io_uring_files_cancel io_wq_cancel_cb io_wqe_cancel_pending_work // &wqe->lock io_cancel_task_cb // &ctx->completion_lock Only __io_queue_deferred() calls queue_async_work() while holding ctx->completion_lock, enqueue drained requests via io_req_task_queue() instead. Cc: stable@xxxxxxxxxxxxxxx # 5.9+ Reported-by: Joseph Qi <joseph.qi@xxxxxxxxxxxxxxxxx> Tested-by: Joseph Qi <joseph.qi@xxxxxxxxxxxxxxxxx> Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -972,6 +972,7 @@ static int io_setup_async_rw(struct io_k const struct iovec *fast_iov, struct iov_iter *iter, bool force); static void io_req_drop_files(struct io_kiocb *req); +static void io_req_task_queue(struct io_kiocb *req); static struct kmem_cache *req_cachep; @@ -1502,18 +1503,11 @@ static void __io_queue_deferred(struct i do { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); - struct io_kiocb *link; if (req_need_defer(de->req, de->seq)) break; list_del_init(&de->list); - /* punt-init is done before queueing for defer */ - link = __io_queue_async_work(de->req); - if (link) { - __io_queue_linked_timeout(link); - /* drop submission reference */ - io_put_req_deferred(link, 1); - } + io_req_task_queue(de->req); kfree(de); } while (!list_empty(&ctx->defer_list)); }