The current cancelation will lookup and cancel the first request it finds based on the key passed in. Add a flag that allows to cancel any request that matches they key. It completes with either -ENOENT if none were found, or res > 0 for the number of entries canceled. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io_uring.c | 60 +++++++++++++++++++++++++---------- include/uapi/linux/io_uring.h | 7 ++++ 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6dcf3ad7ee99..c7e5d60fbbe5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -568,6 +568,7 @@ struct io_sync { struct io_cancel { struct file *file; u64 addr; + u32 flags; }; struct io_timeout { @@ -6319,6 +6320,7 @@ static bool io_poll_disarm(struct io_kiocb *req) struct io_cancel_data { struct io_ring_ctx *ctx; u64 data; + u32 flags; }; static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) @@ -6774,7 +6776,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx, if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, + cd->flags & IORING_ASYNC_CANCEL_ALL); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6825,27 +6828,34 @@ static int io_async_cancel_prep(struct io_kiocb *req, return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags || - sqe->splice_fd_in) + if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); + req->cancel.flags = READ_ONCE(sqe->cancel_flags); + if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + return -EINVAL; + return 0; } -static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, + unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - struct io_cancel_data cd = { - .ctx = ctx, - .data = req->cancel.addr, - }; + bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL; + struct io_ring_ctx *ctx = cd->ctx; struct io_tctx_node *node; - int ret; + int ret, nr = 0; - ret = io_try_cancel(req, &cd); - if (ret != -ENOENT) - goto done; + do { + ret = io_try_cancel(req, cd); + if (ret == -ENOENT) + break; + if (!cancel_all) + return ret; + nr++; + io_run_task_work(); + } while (1); /* slow path, try all io-wq's */ io_ring_submit_lock(ctx, issue_flags); @@ -6853,12 +6863,28 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; - ret = io_async_cancel_one(tctx, &cd); - if (ret != -ENOENT) - break; + ret = io_async_cancel_one(tctx, cd); + if (ret != -ENOENT) { + if (!cancel_all) + break; + nr++; + io_run_task_work(); + } } io_ring_submit_unlock(ctx, issue_flags); -done: + return cancel_all ? nr : ret; +} + +static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_cancel_data cd = { + .ctx = req->ctx, + .data = req->cancel.addr, + .flags = req->cancel.flags, + }; + int ret; + + ret = __io_async_cancel(&cd, req, issue_flags); if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1845cf7c80ba..476e58a2837f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -187,6 +187,13 @@ enum { #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +/* + * ASYNC_CANCEL flags. + * + * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + */ +#define IORING_ASYNC_CANCEL_ALL (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- 2.35.1