[RFC PATCH 1/2] io_uring: make io_req_normal_work_add accept a list of requests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Make io_req_normal_work_add accept a list of requests to help with
batching multiple requests in one call and reducing the contention when
adding to tctx->task_list.

Signed-off-by: Bui Quang Minh <minhquangbui99@xxxxxxxxx>
---
 io_uring/io_uring.c | 13 ++++++++-----
 io_uring/io_uring.h |  2 ++
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ceacf6230e34..0c111f7d7832 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1214,13 +1214,16 @@ static inline void io_req_local_work_add(struct io_kiocb *req,
 	wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
 }
 
-static void io_req_normal_work_add(struct io_kiocb *req)
+void io_req_normal_work_add(struct io_kiocb *first_req,
+			    struct io_kiocb *last_req)
 {
-	struct io_uring_task *tctx = req->tctx;
-	struct io_ring_ctx *ctx = req->ctx;
+	struct io_uring_task *tctx = first_req->tctx;
+	struct io_ring_ctx *ctx = first_req->ctx;
 
 	/* task_work already pending, we're done */
-	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
+	if (!llist_add_batch(&first_req->io_task_work.node,
+			     &last_req->io_task_work.node,
+			     &tctx->task_list))
 		return;
 
 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
@@ -1243,7 +1246,7 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
 	if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
 		io_req_local_work_add(req, req->ctx, flags);
 	else
-		io_req_normal_work_add(req);
+		io_req_normal_work_add(req, req);
 }
 
 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ab619e63ef39..bdd6407c14d0 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -88,6 +88,8 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
 				 unsigned flags);
+void io_req_normal_work_add(struct io_kiocb *first_req,
+				   struct io_kiocb *last_req);
 bool io_alloc_async_data(struct io_kiocb *req);
 void io_req_task_queue(struct io_kiocb *req);
 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
-- 
2.43.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux