This is used in the next patch for task_work batch optimization. Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index db5d9189df3a..7c6d90d693b8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -470,6 +470,7 @@ struct io_uring_task { struct io_wq_work_list prior_task_list; struct callback_head task_work; bool task_running; + unsigned int nr_ctx; }; /* @@ -9655,6 +9656,9 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); list_add(&node->ctx_node, &ctx->tctx_list); mutex_unlock(&ctx->uring_lock); + spin_lock_irq(&tctx->task_lock); + tctx->nr_ctx++; + spin_unlock_irq(&tctx->task_lock); } tctx->last = ctx; return 0; @@ -9692,6 +9696,9 @@ static __cold void io_uring_del_tctx_node(unsigned long index) mutex_lock(&node->ctx->uring_lock); list_del(&node->ctx_node); mutex_unlock(&node->ctx->uring_lock); + spin_lock_irq(&tctx->task_lock); + tctx->nr_ctx--; + spin_unlock_irq(&tctx->task_lock); if (tctx->last == node->ctx) tctx->last = NULL; -- 2.24.4