[RFC 03/11] io_uring: ban tw queue for exiting processes

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



We rely on PF_EXITING and task_work infrastructure for preventing adding
new task_work items to a dying task, which is a bit more convoluted than
desired.

Ban new tw items earlier in io_uring_cancel_generic() by relying on
->in_idle. io_req_task_work_add() will check the flag, set REQ_F_FAIL
and push requests to the fallback path. task_work handlers will find it
and cancel requests just as it was with PF_EXITING.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
 fs/io_uring.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 272a180ab7ee..ec5fe55ab265 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1306,7 +1306,7 @@ static inline void req_ref_get(struct io_kiocb *req)
 
 static inline bool io_should_fail_tw(struct io_kiocb *req)
 {
-	return unlikely(req->task->flags & PF_EXITING);
+	return unlikely(req->flags & REQ_F_FAIL);
 }
 
 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
@@ -2577,10 +2577,6 @@ static void tctx_task_work(struct callback_head *cb)
 	}
 
 	ctx_flush_and_put(ctx, &uring_locked);
-
-	/* relaxed read is enough as only the task itself sets ->in_idle */
-	if (unlikely(atomic_read(&tctx->in_idle)))
-		io_uring_drop_tctx_refs(current);
 }
 
 static void io_req_task_work_add(struct io_kiocb *req, bool priority)
@@ -2600,6 +2596,9 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
 	spin_lock_irqsave(&tctx->task_lock, flags);
 	list = priority ? &tctx->prior_task_list : &tctx->task_list;
 	wq_list_add_tail(&req->io_task_work.node, list);
+	if (unlikely(atomic_read(&tctx->in_idle)))
+		goto cancel_locked;
+
 	running = tctx->task_running;
 	if (!running)
 		tctx->task_running = true;
@@ -2623,12 +2622,13 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
 	}
 
 	spin_lock_irqsave(&tctx->task_lock, flags);
-	tctx->task_running = false;
+cancel_locked:
 	node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
 	spin_unlock_irqrestore(&tctx->task_lock, flags);
 
 	while (node) {
 		req = container_of(node, struct io_kiocb, io_task_work.node);
+		req_set_fail(req);
 		node = node->next;
 		if (llist_add(&req->io_task_work.fallback_node,
 			      &req->ctx->fallback_llist))
@@ -10352,7 +10352,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
 	if (tctx->io_wq)
 		io_wq_exit_start(tctx->io_wq);
 
+	spin_lock_irq(&tctx->task_lock);
 	atomic_inc(&tctx->in_idle);
+	spin_unlock_irq(&tctx->task_lock);
+
 	do {
 		io_uring_drop_tctx_refs(current);
 		/* read completions before cancelations */
-- 
2.36.0




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux