From: Pavel Begunkov <asml.silence@xxxxxxxxx> [ Upstream commit e276ae344a770f91912a81c6a338d92efd319be2 ] A preparation patch, make sure we always hold uring_lock around io_req_complete_failed(). The only place deviating from the rule is io_cancel_defer_files(), queue a tw instead. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> Link: https://lore.kernel.org/r/70760344eadaecf2939287084b9d4ba5c05a6984.1669203009.git.asml.silence@xxxxxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Stable-dep-of: ef5c600adb1d ("io_uring: always prep_async for drain requests") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- io_uring/io_uring.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9b1c917c99d9..6b81a0d2d9bc 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -871,9 +871,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) } void io_req_complete_failed(struct io_kiocb *req, s32 res) + __must_hold(&ctx->uring_lock) { const struct io_op_def *def = &io_op_defs[req->opcode]; + lockdep_assert_held(&req->ctx->uring_lock); + req_set_fail(req); io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); if (def->fail) @@ -1631,6 +1634,7 @@ static u32 io_get_sequence(struct io_kiocb *req) } static __cold void io_drain_req(struct io_kiocb *req) + __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -2867,7 +2871,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); - io_req_complete_failed(de->req, -ECANCELED); + io_req_task_queue_fail(de->req, -ECANCELED); kfree(de); } return true; -- 2.39.0