io_uring_cmd_done() is called from the irq context and is considered to be irq safe, however that's not the case if the driver requires cancellations because io_uring_cmd_del_cancelable() could try to take the uring_lock mutex. Clean up the confusion, by deferring cancellation handling to locked task_work if we came into io_uring_cmd_done() from iowq or other IO_URING_F_UNLOCKED path. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- io_uring/uring_cmd.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index ec38a8d4836d..9590081feb2d 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -14,19 +14,18 @@ #include "rsrc.h" #include "uring_cmd.h" -static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd) { struct io_kiocb *req = cmd_to_io_kiocb(cmd); struct io_ring_ctx *ctx = req->ctx; + lockdep_assert_held(&ctx->uring_lock); + if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) return; cmd->flags &= ~IORING_URING_CMD_CANCELABLE; - io_ring_submit_lock(ctx, issue_flags); hlist_del(&req->hash_node); - io_ring_submit_unlock(ctx, issue_flags); } /* @@ -44,6 +43,9 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, struct io_kiocb *req = cmd_to_io_kiocb(cmd); struct io_ring_ctx *ctx = req->ctx; + if (WARN_ON_ONCE(ctx->flags & IORING_SETUP_IOPOLL)) + return; + if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) { cmd->flags |= IORING_URING_CMD_CANCELABLE; io_ring_submit_lock(ctx, issue_flags); @@ -84,6 +86,15 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req, req->big_cqe.extra2 = extra2; } +static void io_req_cmd_task_complete(struct io_kiocb *req, struct io_tw_state *ts) +{ + struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + + io_tw_lock(req->ctx, ts); + io_uring_cmd_del_cancelable(ioucmd); + io_req_task_complete(req, ts); +} + /* * Called by consumers of io_uring_cmd, if they originally returned * -EIOCBQUEUED upon receiving the command. @@ -93,8 +104,6 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, { struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); - io_uring_cmd_del_cancelable(ioucmd, issue_flags); - if (ret < 0) req_set_fail(req); @@ -107,9 +116,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) { if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED)) return; + io_uring_cmd_del_cancelable(ioucmd); io_req_complete_defer(req); } else { - req->io_task_work.func = io_req_task_complete; + req->io_task_work.func = io_req_cmd_task_complete; io_req_task_work_add(req); } } -- 2.44.0