Similarly as we keep tctx->inflight add tctx->inflight_files tracking number of requests with ->files set. That's a preparation patch, so it's kept unused for now. Also, as it's not as hot as ->inflight use atomics. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 27 +++++++++++++++++++-------- include/linux/io_uring.h | 1 + 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1401c1444e77..3a3177739b13 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1497,6 +1497,7 @@ static bool io_grab_identity(struct io_kiocb *req) req->flags |= REQ_F_INFLIGHT; spin_lock_irq(&ctx->inflight_lock); + atomic_inc(¤t->io_uring->inflight_files); list_add(&req->inflight_entry, &ctx->inflight_list); spin_unlock_irq(&ctx->inflight_lock); req->work.flags |= IO_WQ_WORK_FILES; @@ -6101,6 +6102,7 @@ static void io_req_drop_files(struct io_kiocb *req) put_nsproxy(req->work.identity->nsproxy); spin_lock_irqsave(&ctx->inflight_lock, flags); list_del(&req->inflight_entry); + atomic_dec(&tctx->inflight_files); spin_unlock_irqrestore(&ctx->inflight_lock, flags); req->flags &= ~REQ_F_INFLIGHT; req->work.flags &= ~IO_WQ_WORK_FILES; @@ -8012,6 +8014,7 @@ static int io_uring_alloc_task_context(struct task_struct *task) init_waitqueue_head(&tctx->wait); tctx->last = NULL; atomic_set(&tctx->in_idle, 0); + atomic_set(&tctx->inflight_files, 0); tctx->sqpoll = false; io_init_identity(&tctx->__identity); tctx->identity = &tctx->__identity; @@ -8927,13 +8930,17 @@ void __io_uring_files_cancel(struct files_struct *files) atomic_dec(&tctx->in_idle); } -static s64 tctx_inflight(struct io_uring_task *tctx) +static s64 tctx_inflight(struct io_uring_task *tctx, bool files) { unsigned long index; struct file *file; s64 inflight; - inflight = percpu_counter_sum(&tctx->inflight); + if (files) + inflight = atomic_read(&tctx->inflight_files); + else + inflight = percpu_counter_sum(&tctx->inflight); + if (!tctx->sqpoll) return inflight; @@ -8943,12 +8950,16 @@ static s64 tctx_inflight(struct io_uring_task *tctx) */ xa_for_each(&tctx->xa, index, file) { struct io_ring_ctx *ctx = file->private_data; + struct io_uring_task *sqpoll_tctx; - if (ctx->flags & IORING_SETUP_SQPOLL) { - struct io_uring_task *__tctx = ctx->sqo_task->io_uring; + if (!(ctx->flags & IORING_SETUP_SQPOLL)) + continue; - inflight += percpu_counter_sum(&__tctx->inflight); - } + sqpoll_tctx = ctx->sqo_task->io_uring; + if (files) + inflight += atomic_read(&sqpoll_tctx->inflight_files); + else + inflight += percpu_counter_sum(&sqpoll_tctx->inflight); } return inflight; @@ -8969,7 +8980,7 @@ void __io_uring_task_cancel(void) do { /* read completions before cancelations */ - inflight = tctx_inflight(tctx); + inflight = tctx_inflight(tctx, false); if (!inflight) break; __io_uring_files_cancel(NULL); @@ -8980,7 +8991,7 @@ void __io_uring_task_cancel(void) * If we've seen completions, retry. This avoids a race where * a completion comes in before we did prepare_to_wait(). */ - if (inflight != tctx_inflight(tctx)) + if (inflight != tctx_inflight(tctx, false)) continue; schedule(); } while (1); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 35b2d845704d..e1ff6f235d03 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -28,6 +28,7 @@ struct io_uring_task { struct wait_queue_head wait; struct file *last; struct percpu_counter inflight; + atomic_t inflight_files; struct io_identity __identity; struct io_identity *identity; atomic_t in_idle; -- 2.24.0