task_work_cb and its child functions may call io_uring_submit_sqe() in io_uring_cmd's callback, so to avoid ctx->uring_lock deadlock, introduce IORING_URING_CMD_UNLOCK to unlock uring_lock temporarily in io_uring_cmd_work(). Signed-off-by: Xiaoguang Wang <xiaoguang.wang@xxxxxxxxxxxxxxxxx> --- include/uapi/linux/io_uring.h | 5 +++++ io_uring/uring_cmd.c | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 2780bce62faf..45ea8c35d251 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -232,8 +232,13 @@ enum io_uring_op { * sqe->uring_cmd_flags * IORING_URING_CMD_FIXED use registered buffer; pass this flag * along with setting sqe->buf_index. + * + * IORING_URING_CMD_UNLOCK Notify io_uring_cmd's task_work_cb to + * unlock uring_lock, some ->uring_cmd() + * implementations need it. */ #define IORING_URING_CMD_FIXED (1U << 0) +#define IORING_URING_CMD_UNLOCK (1U << 1) /* diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 446a189b78b0..11488a702832 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -16,7 +16,11 @@ static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) { struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + if ((req->flags & IORING_URING_CMD_UNLOCK) && *locked) + mutex_unlock(&(req->ctx->uring_lock)); ioucmd->task_work_cb(ioucmd); + if ((req->flags & IORING_URING_CMD_UNLOCK) && *locked) + mutex_lock(&(req->ctx->uring_lock)); } void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, @@ -82,7 +86,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); - if (ioucmd->flags & ~IORING_URING_CMD_FIXED) + if (ioucmd->flags & ~(IORING_URING_CMD_FIXED | IORING_URING_CMD_UNLOCK)) return -EINVAL; if (ioucmd->flags & IORING_URING_CMD_FIXED) { -- 2.31.1