[PATCH] tools/io_uring: Use <asm/barrier.h> instead of tools/io_uring/barrier.h

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch avoids that multiple definitions of barrier primitives occur in
the tools directory. This patch does not change the behavior of the code on
x86 since on x86 smp_rmb() and smp_wmb() are defined as follows in
tools/arch/x86/include/asm/barrier.h:

 #define barrier() __asm__ __volatile__("": : :"memory")
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()

Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
 tools/io_uring/barrier.h        | 16 ----------------
 tools/io_uring/io_uring-bench.c | 12 ++++++------
 tools/io_uring/liburing.h       |  4 ++--
 tools/io_uring/queue.c          | 14 +++++++-------
 4 files changed, 15 insertions(+), 31 deletions(-)
 delete mode 100644 tools/io_uring/barrier.h

diff --git a/tools/io_uring/barrier.h b/tools/io_uring/barrier.h
deleted file mode 100644
index ef00f6722ba9..000000000000
--- a/tools/io_uring/barrier.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef LIBURING_BARRIER_H
-#define LIBURING_BARRIER_H
-
-#if defined(__x86_64) || defined(__i386__)
-#define read_barrier()	__asm__ __volatile__("":::"memory")
-#define write_barrier()	__asm__ __volatile__("":::"memory")
-#else
-/*
- * Add arch appropriate definitions. Be safe and use full barriers for
- * archs we don't have support for.
- */
-#define read_barrier()	__sync_synchronize()
-#define write_barrier()	__sync_synchronize()
-#endif
-
-#endif
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
index 0f257139b003..3ce715247c5e 100644
--- a/tools/io_uring/io_uring-bench.c
+++ b/tools/io_uring/io_uring-bench.c
@@ -28,9 +28,9 @@
 #include <string.h>
 #include <pthread.h>
 #include <sched.h>
+#include <asm/barrier.h>
 
 #include "liburing.h"
-#include "barrier.h"
 
 #define min(a, b)		((a < b) ? (a) : (b))
 
@@ -199,7 +199,7 @@ static int prep_more_ios(struct submitter *s, unsigned max_ios)
 	next_tail = tail = *ring->tail;
 	do {
 		next_tail++;
-		read_barrier();
+		smp_rmb();
 		if (next_tail == *ring->head)
 			break;
 
@@ -212,9 +212,9 @@ static int prep_more_ios(struct submitter *s, unsigned max_ios)
 
 	if (*ring->tail != tail) {
 		/* order tail store with writes to sqes above */
-		write_barrier();
+		smp_wmb();
 		*ring->tail = tail;
-		write_barrier();
+		smp_wmb();
 	}
 	return prepped;
 }
@@ -251,7 +251,7 @@ static int reap_events(struct submitter *s)
 	do {
 		struct file *f;
 
-		read_barrier();
+		smp_rmb();
 		if (head == *ring->tail)
 			break;
 		cqe = &ring->cqes[head & cq_ring_mask];
@@ -271,7 +271,7 @@ static int reap_events(struct submitter *s)
 
 	s->inflight -= reaped;
 	*ring->head = head;
-	write_barrier();
+	smp_wmb();
 	return reaped;
 }
 
diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h
index 5f305c86b892..3670a08101c7 100644
--- a/tools/io_uring/liburing.h
+++ b/tools/io_uring/liburing.h
@@ -10,7 +10,7 @@ extern "C" {
 #include <string.h>
 #include "../../include/uapi/linux/io_uring.h"
 #include <inttypes.h>
-#include "barrier.h"
+#include <asm/barrier.h>
 
 /*
  * Library interface to io_uring
@@ -87,7 +87,7 @@ static inline void io_uring_cqe_seen(struct io_uring *ring,
 		 * Ensure that the kernel sees our new head, the kernel has
 		 * the matching read barrier.
 		 */
-		write_barrier();
+		smp_wmb();
 	}
 }
 
diff --git a/tools/io_uring/queue.c b/tools/io_uring/queue.c
index 321819c132c7..aadf4d926c8e 100644
--- a/tools/io_uring/queue.c
+++ b/tools/io_uring/queue.c
@@ -4,9 +4,9 @@
 #include <unistd.h>
 #include <errno.h>
 #include <string.h>
+#include <asm/barrier.h>
 
 #include "liburing.h"
-#include "barrier.h"
 
 static int __io_uring_get_cqe(struct io_uring *ring,
 			      struct io_uring_cqe **cqe_ptr, int wait)
@@ -20,13 +20,13 @@ static int __io_uring_get_cqe(struct io_uring *ring,
 	head = *cq->khead;
 	do {
 		/*
-		 * It's necessary to use a read_barrier() before reading
+		 * It's necessary to use a smp_rmb() before reading
 		 * the CQ tail, since the kernel updates it locklessly. The
 		 * kernel has the matching store barrier for the update. The
 		 * kernel also ensures that previous stores to CQEs are ordered
 		 * with the tail update.
 		 */
-		read_barrier();
+		smp_rmb();
 		if (head != *cq->ktail) {
 			*cqe_ptr = &cq->cqes[head & mask];
 			break;
@@ -77,7 +77,7 @@ int io_uring_submit(struct io_uring *ring)
 	 * read barrier here to match the kernels store barrier when updating
 	 * the SQ head.
 	 */
-	read_barrier();
+	smp_rmb();
 	if (*sq->khead != *sq->ktail) {
 		submitted = *sq->kring_entries;
 		goto submit;
@@ -94,7 +94,7 @@ int io_uring_submit(struct io_uring *ring)
 	to_submit = sq->sqe_tail - sq->sqe_head;
 	while (to_submit--) {
 		ktail_next++;
-		read_barrier();
+		smp_rmb();
 
 		sq->array[ktail & mask] = sq->sqe_head & mask;
 		ktail = ktail_next;
@@ -113,13 +113,13 @@ int io_uring_submit(struct io_uring *ring)
 		 * will never see a tail update without the preceeding sQE
 		 * stores being done.
 		 */
-		write_barrier();
+		smp_wmb();
 		*sq->ktail = ktail;
 		/*
 		 * The kernel has the matching read barrier for reading the
 		 * SQ tail.
 		 */
-		write_barrier();
+		smp_wmb();
 	}
 
 submit:
-- 
2.22.0.410.gd8fdbe21b5-goog




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux