To not skip events, io_uring_cancel_files() is pretty much counts on io_uring_count_inflight() to be monotonous for the time it's used. That's not the case when it includes requests of other thats that are PF_EXITING. Cancel as before, but don't account extra in io_uring_count_inflight(), we can hang otherwise. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 17194e0d62ff..6b73e38aa1a9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1090,15 +1090,14 @@ static inline void io_set_resource_node(struct io_kiocb *req) } } -static bool io_match_task(struct io_kiocb *head, - struct task_struct *task, - struct files_struct *files) +static bool __io_match_task(struct io_kiocb *head, struct task_struct *task, + struct files_struct *files, bool match_exiting) { struct io_kiocb *req; if (task && head->task != task) { /* in terms of cancelation, always match if req task is dead */ - if (head->task->flags & PF_EXITING) + if (match_exiting && (head->task->flags & PF_EXITING)) return true; return false; } @@ -1117,6 +1116,13 @@ static bool io_match_task(struct io_kiocb *head, return false; } +static bool io_match_task(struct io_kiocb *head, + struct task_struct *task, + struct files_struct *files) +{ + return __io_match_task(head, task, files, true); +} + static void io_sq_thread_drop_mm_files(void) { struct files_struct *files = current->files; @@ -9032,7 +9038,7 @@ static int io_uring_count_inflight(struct io_ring_ctx *ctx, spin_lock_irq(&ctx->inflight_lock); list_for_each_entry(req, &ctx->inflight_list, inflight_entry) - cnt += io_match_task(req, task, files); + cnt += __io_match_task(req, task, files, false); spin_unlock_irq(&ctx->inflight_lock); return cnt; } -- 2.24.0