Do same thing for files cancellation as we do for task cancellations, in particular keep trying to cancel while corresponding inflight counters are not zero. It's a preparation patch, io_uring_try_task_cancel still guarantees to kill all requests matching files at first attempt. It also deduplicate a bit those two functions allowing exporting only __io_uring_task_cancel() from them to the core. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 29 +++++++++++++++-------------- include/linux/io_uring.h | 12 +++++------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1794ad4bfa39..d20a2a96c3f8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8909,7 +8909,7 @@ static void io_uring_attempt_task_drop(struct file *file) io_uring_del_task_file(current->io_uring, file); } -void __io_uring_files_cancel(struct files_struct *files) +static void io_uring_try_task_cancel(struct files_struct *files) { struct io_uring_task *tctx = current->io_uring; struct file *file; @@ -8917,15 +8917,8 @@ void __io_uring_files_cancel(struct files_struct *files) /* make sure overflow events are dropped */ atomic_inc(&tctx->in_idle); - - xa_for_each(&tctx->xa, index, file) { - struct io_ring_ctx *ctx = file->private_data; - - io_uring_cancel_task_requests(ctx, files); - if (files) - io_uring_del_task_file(tctx, file); - } - + xa_for_each(&tctx->xa, index, file) + io_uring_cancel_task_requests(file->private_data, files); atomic_dec(&tctx->in_idle); } @@ -8968,7 +8961,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool files) * Find any io_uring fd that this task has registered or done IO on, and cancel * requests. */ -void __io_uring_task_cancel(void) +void __io_uring_task_cancel(struct files_struct *files) { struct io_uring_task *tctx = current->io_uring; DEFINE_WAIT(wait); @@ -8979,10 +8972,10 @@ void __io_uring_task_cancel(void) do { /* read completions before cancelations */ - inflight = tctx_inflight(tctx, false); + inflight = tctx_inflight(tctx, !!files); if (!inflight) break; - __io_uring_files_cancel(NULL); + io_uring_try_task_cancel(files); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); @@ -8990,13 +8983,21 @@ void __io_uring_task_cancel(void) * If we've seen completions, retry. This avoids a race where * a completion comes in before we did prepare_to_wait(). */ - if (inflight != tctx_inflight(tctx, false)) + if (inflight != tctx_inflight(tctx, !!files)) continue; schedule(); } while (1); finish_wait(&tctx->wait, &wait); atomic_dec(&tctx->in_idle); + + if (files) { + struct file *file; + unsigned long index; + + xa_for_each(&tctx->xa, index, file) + io_uring_del_task_file(tctx, file); + } } static int io_uring_flush(struct file *file, void *data) diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index e1ff6f235d03..282f02bd04a5 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -37,19 +37,17 @@ struct io_uring_task { #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_task_cancel(void); -void __io_uring_files_cancel(struct files_struct *files); +void __io_uring_task_cancel(struct files_struct *files); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_task_cancel(void) +static inline void io_uring_files_cancel(struct files_struct *files) { if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_task_cancel(); + __io_uring_task_cancel(files); } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_task_cancel(void) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_files_cancel(files); + io_uring_files_cancel(NULL); } static inline void io_uring_free(struct task_struct *tsk) { -- 2.24.0