We don't do files-specific cancellations anymore, so we can kill io_uring_cancel_files() and old tracking scheme where we keep all such requests in ->inflight_list and synchronise it by ->inflight_lock. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 63 ++------------------------------------------------- 1 file changed, 2 insertions(+), 61 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5bfcb72c916e..0c886ef49920 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -389,9 +389,6 @@ struct io_ring_ctx { struct hlist_head *cancel_hash; unsigned cancel_hash_bits; bool poll_multi_file; - - spinlock_t inflight_lock; - struct list_head inflight_list; } ____cacheline_aligned_in_smp; struct delayed_work rsrc_put_work; @@ -734,10 +731,7 @@ struct io_kiocb { struct io_kiocb *link; struct percpu_ref *fixed_rsrc_refs; - /* - * 1. used with ctx->iopoll_list with reads/writes - * 2. to track reqs with ->files (see io_op_def::file_table) - */ + /* tracks iopoll requests, see ctx->iopoll_list */ struct list_head inflight_entry; struct callback_head task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ @@ -1331,8 +1325,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); - spin_lock_init(&ctx->inflight_lock); - INIT_LIST_HEAD(&ctx->inflight_list); spin_lock_init(&ctx->rsrc_ref_lock); INIT_LIST_HEAD(&ctx->rsrc_ref_list); INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); @@ -1451,7 +1443,6 @@ static bool io_grab_identity(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; struct io_identity *id = req->work.identity; - struct io_ring_ctx *ctx = req->ctx; if (def->work_flags & IO_WQ_WORK_FSIZE) { if (id->fsize != rlimit(RLIMIT_FSIZE)) @@ -1508,10 +1499,6 @@ static bool io_grab_identity(struct io_kiocb *req) atomic_inc(&id->files->count); get_nsproxy(id->nsproxy); req->flags |= REQ_F_INFLIGHT; - - spin_lock_irq(&ctx->inflight_lock); - list_add(&req->inflight_entry, &ctx->inflight_list); - spin_unlock_irq(&ctx->inflight_lock); req->work.flags |= IO_WQ_WORK_FILES; } if (!(req->work.flags & IO_WQ_WORK_MM) && @@ -6155,15 +6142,10 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) static void io_req_drop_files(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; struct io_uring_task *tctx = req->task->io_uring; - unsigned long flags; put_files_struct(req->work.identity->files); put_nsproxy(req->work.identity->nsproxy); - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); req->flags &= ~REQ_F_INFLIGHT; req->work.flags &= ~IO_WQ_WORK_FILES; if (atomic_read(&tctx->in_idle)) @@ -8919,43 +8901,6 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, } } -static void io_uring_cancel_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) -{ - while (!list_empty_careful(&ctx->inflight_list)) { - struct io_task_cancel cancel = { .task = task, .files = files }; - struct io_kiocb *req; - DEFINE_WAIT(wait); - bool found = false; - - spin_lock_irq(&ctx->inflight_lock); - list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { - if (req->task != task || - req->work.identity->files != files) - continue; - found = true; - break; - } - if (found) - prepare_to_wait(&task->io_uring->wait, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&ctx->inflight_lock); - - /* We need to keep going until we don't find a matching req */ - if (!found) - break; - - io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); - io_poll_remove_all(ctx, task, files); - io_kill_timeouts(ctx, task, files); - /* cancellations _may_ trigger task work */ - io_run_task_work(); - schedule(); - finish_wait(&task->io_uring->wait, &wait); - } -} - static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, struct task_struct *task) { @@ -9019,11 +8964,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, io_cancel_defer_files(ctx, task, files); io_cqring_overflow_flush(ctx, true, task, files); - - if (!files) - __io_uring_cancel_task_requests(ctx, task); - else - io_uring_cancel_files(ctx, task, files); + __io_uring_cancel_task_requests(ctx, task); if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { atomic_dec(&task->io_uring->in_idle); -- 2.24.0