Clean up io_cancel_task_cb() and related io-wq cancellation after renouncing from files cancellations. No need to drag files anymore, just pass task directly. io_match_task() guarantees to not walk through linked request when files==NULL, so we can also get rid of ugly conditional synchronisation there. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0c886ef49920..8d181ef44398 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8850,29 +8850,12 @@ static int io_uring_release(struct inode *inode, struct file *file) return 0; } -struct io_task_cancel { - struct task_struct *task; - struct files_struct *files; -}; - static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); - struct io_task_cancel *cancel = data; - bool ret; - - if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { - unsigned long flags; - struct io_ring_ctx *ctx = req->ctx; + struct task_struct *tsk = data; - /* protect against races with linked timeouts */ - spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_match_task(req, cancel->task, cancel->files); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - } else { - ret = io_match_task(req, cancel->task, cancel->files); - } - return ret; + return io_match_task(req, tsk, NULL); } static void io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -8905,13 +8888,12 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, struct task_struct *task) { while (1) { - struct io_task_cancel cancel = { .task = task, .files = NULL, }; enum io_wq_cancel cret; bool ret = false; if (ctx->io_wq) { cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, - &cancel, true); + task, true); ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } -- 2.24.0