[PATCH 3/4] io_uring: ensure iopoll runs local task work as well

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Combine the two checks we have for task_work running and whether or not
we need to shuffle the mutex into one, so we unify how task_work is run
in the iopoll loop. This helps ensure that local task_work is run when
needed, and also optimizes that path to avoid a mutex shuffle if it's
not needed.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 io_uring/io_uring.c | 39 ++++++++++++++++++++-------------------
 io_uring/io_uring.h |  6 ++++++
 2 files changed, 26 insertions(+), 19 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f841f0e126bc..118db2264189 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
 		 * forever, while the workqueue is stuck trying to acquire the
 		 * very same mutex.
 		 */
-		if (wq_list_empty(&ctx->iopoll_list)) {
-			u32 tail = ctx->cached_cq_tail;
-
-			mutex_unlock(&ctx->uring_lock);
-			ret = io_run_task_work_ctx(ctx);
-			mutex_lock(&ctx->uring_lock);
-			if (ret < 0)
-				break;
-
-			/* some requests don't go through iopoll_list */
-			if (tail != ctx->cached_cq_tail ||
-			    wq_list_empty(&ctx->iopoll_list))
-				break;
-		}
-
-		if (task_work_pending(current)) {
-			mutex_unlock(&ctx->uring_lock);
-			io_run_task_work();
-			mutex_lock(&ctx->uring_lock);
+		if (wq_list_empty(&ctx->iopoll_list) ||
+		    io_task_work_pending(ctx)) {
+			if (!llist_empty(&ctx->work_llist))
+				__io_run_local_work(ctx, true);
+			if (task_work_pending(current) ||
+			    wq_list_empty(&ctx->iopoll_list)) {
+				u32 tail = ctx->cached_cq_tail;
+
+				mutex_unlock(&ctx->uring_lock);
+				ret = io_run_task_work();
+				mutex_lock(&ctx->uring_lock);
+
+				if (ret < 0)
+					break;
+
+				/* some requests don't go through iopoll_list */
+				if (tail != ctx->cached_cq_tail ||
+				    wq_list_empty(&ctx->iopoll_list))
+					break;
+			}
 		}
 		ret = io_do_iopoll(ctx, !min);
 		if (ret < 0)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 0f90d1dfa42b..9d89425292b7 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -236,6 +236,12 @@ static inline int io_run_task_work(void)
 	return 0;
 }
 
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+{
+	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+		!wq_list_empty(&ctx->work_llist);
+}
+
 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
 {
 	int ret = 0;
-- 
2.35.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux