[PATCH 3/4] io_uring: switch fallback work to io_wq_work_list

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Just like what was done for deferred task_work, convert the fallback
task_work to a normal io_wq_work_list.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 include/linux/io_uring_types.h |  2 +-
 io_uring/io_uring.c            | 24 +++++++++++++++++++-----
 2 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index e51bf15196e4..2bc253f8147d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -400,7 +400,7 @@ struct io_ring_ctx {
 	struct mm_struct		*mm_account;
 
 	/* ctx exit and cancelation */
-	struct llist_head		fallback_llist;
+	struct io_wq_work_list		fallback_list;
 	struct delayed_work		fallback_work;
 	struct work_struct		exit_work;
 	struct list_head		tctx_list;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 9c06911077db..8d7138eaa921 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -243,14 +243,22 @@ static __cold void io_fallback_req_func(struct work_struct *work)
 {
 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
 						fallback_work.work);
-	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
-	struct io_kiocb *req, *tmp;
+	struct io_wq_work_node *node;
 	struct io_tw_state ts = {};
+	struct io_kiocb *req;
+
+	spin_lock_irq(&ctx->work_lock);
+	node = ctx->fallback_list.first;
+	INIT_WQ_LIST(&ctx->fallback_list);
+	spin_unlock_irq(&ctx->work_lock);
 
 	percpu_ref_get(&ctx->refs);
 	mutex_lock(&ctx->uring_lock);
-	llist_for_each_entry_safe(req, tmp, node, io_task_work.llist_node)
+	while (node) {
+		req = container_of(node, struct io_kiocb, io_task_work.node);
+		node = node->next;
 		req->io_task_work.func(req, &ts);
+	}
 	io_submit_flush_completions(ctx);
 	mutex_unlock(&ctx->uring_lock);
 	percpu_ref_put(&ctx->refs);
@@ -1167,6 +1175,9 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
 	struct io_kiocb *req;
 
 	while (node) {
+		unsigned long flags;
+		bool do_wake;
+
 		req = container_of(node, struct io_kiocb, io_task_work.llist_node);
 		node = node->next;
 		if (sync && last_ctx != req->ctx) {
@@ -1177,8 +1188,11 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
 			last_ctx = req->ctx;
 			percpu_ref_get(&last_ctx->refs);
 		}
-		if (llist_add(&req->io_task_work.llist_node,
-			      &req->ctx->fallback_llist))
+		spin_lock_irqsave(&req->ctx->work_lock, flags);
+		do_wake = wq_list_empty(&req->ctx->fallback_list);
+		wq_list_add_tail(&req->io_task_work.node, &req->ctx->fallback_list);
+		spin_unlock_irqrestore(&req->ctx->work_lock, flags);
+		if (do_wake)
 			schedule_delayed_work(&req->ctx->fallback_work, 1);
 	}
 
-- 
2.43.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux