__tctx_task_work() guarantees that ctx won't be killed while running task_works, so we can remove now unnecessary ctx pinning for internally armed polling. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3cfc50320923..36d0bc506be4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4780,7 +4780,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, req->result = mask; req->task_work.func = func; - percpu_ref_get(&req->ctx->refs); /* * If this fails, then the task is exiting. When a task exits, the @@ -4877,8 +4876,6 @@ static void io_poll_task_func(struct callback_head *cb) if (nxt) __io_req_task_submit(nxt); } - - percpu_ref_put(&ctx->refs); } static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, @@ -4985,7 +4982,6 @@ static void io_async_task_func(struct callback_head *cb) if (io_poll_rewait(req, &apoll->poll)) { spin_unlock_irq(&ctx->completion_lock); - percpu_ref_put(&ctx->refs); return; } @@ -5001,7 +4997,6 @@ static void io_async_task_func(struct callback_head *cb) else __io_req_task_cancel(req, -ECANCELED); - percpu_ref_put(&ctx->refs); kfree(apoll->double_poll); kfree(apoll); } -- 2.24.0