[PATCH liburing 4/4] Optimize i386 memory barriers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Use identical memory barrier implementations on 32 and 64 bit Intel CPUs.
In the past the Linux kernel supported 32 bit CPUs that violate the x86
ordering standard. Since io_uring is not supported by these older kernels,
do not support these older CPUs in liburing. See also Linux kernel commit
5927145efd5d ("x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk") # v4.16.

Cc: Roman Penyaev <rpenyaev@xxxxxxx>
Suggested-by: Roman Penyaev <rpenyaev@xxxxxxx>
Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
 src/barrier.h | 16 ++++------------
 1 file changed, 4 insertions(+), 12 deletions(-)

diff --git a/src/barrier.h b/src/barrier.h
index eb8ee1ec9d34..e079cf609f26 100644
--- a/src/barrier.h
+++ b/src/barrier.h
@@ -32,25 +32,18 @@ after the acquire operation executes. This is implemented using
 
 
 #if defined(__x86_64__) || defined(__i386__)
-/* From tools/arch/x86/include/asm/barrier.h */
-#if defined(__i386__)
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define rmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define wmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#elif defined(__x86_64__)
+/* Adapted from arch/x86/include/asm/barrier.h */
 #define mb()	asm volatile("mfence" ::: "memory")
 #define rmb()	asm volatile("lfence" ::: "memory")
 #define wmb()	asm volatile("sfence" ::: "memory")
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#if defined(__i386__)
+#define smp_mb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc")
+#else
 #define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
 #endif
 
-#if defined(__x86_64__)
 #define smp_store_release(p, v)			\
 do {						\
 	barrier();				\
@@ -63,7 +56,6 @@ do {						\
 	barrier();				\
 	___p1;					\
 })
-#endif /* defined(__x86_64__) */
 #else /* defined(__x86_64__) || defined(__i386__) */
 /*
  * Add arch appropriate definitions. Be safe and use full barriers for
-- 
2.22.0.410.gd8fdbe21b5-goog




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux