[PATCH 5/6] io_uring: add __tctx_task_work_run() helper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Most use cases only care about running all of the task_work, and they
don't need the node passed back or the work capped. Rename the existing
helper to __tctx_task_work_run(), and add a wrapper around that for the
more basic use cases.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 io_uring/io_uring.c | 18 ++++++++++++------
 io_uring/io_uring.h |  9 +++------
 io_uring/sqpoll.c   |  2 +-
 3 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3bb93c77ac3f..bc520a67fc03 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1137,9 +1137,9 @@ static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
 	__io_fallback_tw(&tctx->task_list, &tctx->task_lock, sync);
 }
 
-struct io_wq_work_node *tctx_task_work_run(struct io_uring_task *tctx,
-					   unsigned int max_entries,
-					   unsigned int *count)
+struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
+					     unsigned int max_entries,
+					     unsigned int *count)
 {
 	struct io_wq_work_node *node;
 
@@ -1167,14 +1167,20 @@ struct io_wq_work_node *tctx_task_work_run(struct io_uring_task *tctx,
 	return node;
 }
 
+unsigned int tctx_task_work_run(struct io_uring_task *tctx)
+{
+	unsigned int count = 0;
+
+	__tctx_task_work_run(tctx, UINT_MAX, &count);
+	return count;
+}
+
 void tctx_task_work(struct callback_head *cb)
 {
 	struct io_uring_task *tctx;
-	unsigned int count = 0;
 
 	tctx = container_of(cb, struct io_uring_task, task_work);
-	if (tctx_task_work_run(tctx, UINT_MAX, &count))
-		WARN_ON_ONCE(1);
+	tctx_task_work_run(tctx);
 }
 
 static inline void io_req_local_work_add(struct io_kiocb *req,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 0b5181b128aa..2b0e7c5db30d 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -93,8 +93,9 @@ void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
 struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
 	unsigned int *count, unsigned int max_entries);
-struct io_wq_work_node *tctx_task_work_run(struct io_uring_task *tctx,
+struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
 	unsigned int max_entries, unsigned int *count);
+unsigned int tctx_task_work_run(struct io_uring_task *tctx);
 void tctx_task_work(struct callback_head *cb);
 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
 int io_uring_alloc_task_context(struct task_struct *task,
@@ -332,12 +333,8 @@ static inline int io_run_task_work(void)
 			resume_user_mode_work(NULL);
 		}
 		if (current->io_uring) {
-			unsigned int count = 0;
-
 			__set_current_state(TASK_RUNNING);
-			tctx_task_work_run(current->io_uring, UINT_MAX, &count);
-			if (count)
-				ret = true;
+			ret = tctx_task_work_run(current->io_uring) != 0;
 		}
 	}
 	if (task_work_pending(current)) {
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 615707260f25..aec6c2d56910 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -232,7 +232,7 @@ static unsigned int io_sq_tw(struct io_wq_work_node **retry_list, int max_entrie
 			goto out;
 		max_entries -= count;
 	}
-	*retry_list = tctx_task_work_run(tctx, max_entries, &count);
+	*retry_list = __tctx_task_work_run(tctx, max_entries, &count);
 out:
 	if (task_work_pending(current))
 		task_work_run();
-- 
2.45.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux