[PATCH 2/5] io_uring: serialize ctx->rings->sq_flags with cmpxchg()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Rather than require ctx->completion_lock for ensuring that we don't
clobber the flags, use try_cmpxchg() instead. This removes the need
to grab the completion_lock, in preparation for needing to set or
clear sq_flags when we don't know the status of this lock.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 fs/io_uring.c | 54 ++++++++++++++++++++++++++++++---------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 626bf840bed2..38e58fe4963d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1999,6 +1999,34 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 		io_cqring_wake(ctx);
 }
 
+static void io_ring_sq_flag_clear(struct io_ring_ctx *ctx, unsigned int flag)
+{
+	struct io_rings *rings = ctx->rings;
+	unsigned int oldf, newf;
+
+	do {
+		oldf = READ_ONCE(rings->sq_flags);
+
+		if (!(oldf & flag))
+			break;
+		newf = oldf & ~flag;
+	} while (!try_cmpxchg(&rings->sq_flags, &oldf, newf));
+}
+
+static void io_ring_sq_flag_set(struct io_ring_ctx *ctx, unsigned int flag)
+{
+	struct io_rings *rings = ctx->rings;
+	unsigned int oldf, newf;
+
+	do {
+		oldf = READ_ONCE(rings->sq_flags);
+
+		if (oldf & flag)
+			break;
+		newf = oldf | flag;
+	} while (!try_cmpxchg(&rings->sq_flags, &oldf, newf));
+}
+
 /* Returns true if there are no backlogged entries after the flush */
 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 {
@@ -2030,8 +2058,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 	all_flushed = list_empty(&ctx->cq_overflow_list);
 	if (all_flushed) {
 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
-		WRITE_ONCE(ctx->rings->sq_flags,
-			   ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
+		io_ring_sq_flag_clear(ctx, IORING_SQ_CQ_OVERFLOW);
 	}
 
 	io_commit_cqring(ctx);
@@ -8105,23 +8132,6 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
 	return READ_ONCE(sqd->state);
 }
 
-static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
-{
-	/* Tell userspace we may need a wakeup call */
-	spin_lock(&ctx->completion_lock);
-	WRITE_ONCE(ctx->rings->sq_flags,
-		   ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
-	spin_unlock(&ctx->completion_lock);
-}
-
-static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
-{
-	spin_lock(&ctx->completion_lock);
-	WRITE_ONCE(ctx->rings->sq_flags,
-		   ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
-	spin_unlock(&ctx->completion_lock);
-}
-
 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 {
 	unsigned int to_submit;
@@ -8237,7 +8247,7 @@ static int io_sq_thread(void *data)
 			bool needs_sched = true;
 
 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
-				io_ring_set_wakeup_flag(ctx);
+				io_ring_sq_flag_set(ctx, IORING_SQ_NEED_WAKEUP);
 
 				if ((ctx->flags & IORING_SETUP_IOPOLL) &&
 				    !wq_list_empty(&ctx->iopoll_list)) {
@@ -8263,7 +8273,7 @@ static int io_sq_thread(void *data)
 				mutex_lock(&sqd->lock);
 			}
 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
-				io_ring_clear_wakeup_flag(ctx);
+				io_ring_sq_flag_clear(ctx, IORING_SQ_NEED_WAKEUP);
 		}
 
 		finish_wait(&sqd->wait, &wait);
@@ -8273,7 +8283,7 @@ static int io_sq_thread(void *data)
 	io_uring_cancel_generic(true, sqd);
 	sqd->thread = NULL;
 	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
-		io_ring_set_wakeup_flag(ctx);
+		io_ring_sq_flag_set(ctx, IORING_SQ_NEED_WAKEUP);
 	io_run_task_work();
 	mutex_unlock(&sqd->lock);
 
-- 
2.35.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux