[kvm-unit-tests PATCH 1/8] s390x: lib: Extend bitops

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Bit setting and clearing is never bad to have.

Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
---
 lib/s390x/asm/bitops.h | 102 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 102 insertions(+)

diff --git a/lib/s390x/asm/bitops.h b/lib/s390x/asm/bitops.h
index 792881ec..f5612855 100644
--- a/lib/s390x/asm/bitops.h
+++ b/lib/s390x/asm/bitops.h
@@ -17,6 +17,78 @@
 
 #define BITS_PER_LONG	64
 
+static inline unsigned long *bitops_word(unsigned long nr,
+					 const volatile unsigned long *ptr)
+{
+	unsigned long addr;
+
+	addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+	return (unsigned long *)addr;
+}
+
+static inline unsigned long bitops_mask(unsigned long nr)
+{
+	return 1UL << (nr & (BITS_PER_LONG - 1));
+}
+
+static inline uint64_t laog(volatile unsigned long *ptr, uint64_t mask)
+{
+	uint64_t old;
+
+	/* load and or 64bit concurrent and interlocked */
+	asm volatile(
+		"	laog	%[old],%[mask],%[ptr]\n"
+		: [old] "=d" (old), [ptr] "+Q" (*ptr)
+		: [mask] "d" (mask)
+		: "memory", "cc" );
+	return old;
+}
+
+static inline uint64_t lang(volatile unsigned long *ptr, uint64_t mask)
+{
+	uint64_t old;
+
+	/* load and and 64bit concurrent and interlocked */
+	asm volatile(
+		"	lang	%[old],%[mask],%[ptr]\n"
+		: [old] "=d" (old), [ptr] "+Q" (*ptr)
+		: [mask] "d" (mask)
+		: "memory", "cc" );
+	return old;
+}
+
+static inline void set_bit(unsigned long nr,
+			   const volatile unsigned long *ptr)
+{
+	uint64_t mask = bitops_mask(nr);
+	uint64_t *addr = bitops_word(nr, ptr);
+
+	laog(addr, mask);
+}
+
+static inline void set_bit_inv(unsigned long nr,
+			       const volatile unsigned long *ptr)
+{
+	return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline void clear_bit(unsigned long nr,
+			     const volatile unsigned long *ptr)
+{
+	uint64_t mask = bitops_mask(nr);
+	uint64_t *addr = bitops_word(nr, ptr);
+
+	lang(addr, ~mask);
+}
+
+static inline void clear_bit_inv(unsigned long nr,
+				 const volatile unsigned long *ptr)
+{
+	return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+/* non-atomic bit manipulation functions */
+
 static inline bool test_bit(unsigned long nr,
 			    const volatile unsigned long *ptr)
 {
@@ -33,4 +105,34 @@ static inline bool test_bit_inv(unsigned long nr,
 	return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
+static inline void __set_bit(unsigned long nr,
+			     const volatile unsigned long *ptr)
+{
+	uint64_t mask = bitops_mask(nr);
+	uint64_t *addr = bitops_word(nr, ptr);
+
+	*addr |= mask;
+}
+
+static inline void __set_bit_inv(unsigned long nr,
+				 const volatile unsigned long *ptr)
+{
+	return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline void __clear_bit(unsigned long nr,
+			       const volatile unsigned long *ptr)
+{
+	uint64_t mask = bitops_mask(nr);
+	uint64_t *addr = bitops_word(nr, ptr);
+
+	*addr &= ~mask;
+}
+
+static inline void __clear_bit_inv(unsigned long nr,
+				   const volatile unsigned long *ptr)
+{
+	return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
 #endif
-- 
2.30.2




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux