Commit-ID: 547571b5abe61bb33c6005d8981e86e3c61fedcc Gitweb: https://git.kernel.org/tip/547571b5abe61bb33c6005d8981e86e3c61fedcc Author: Jan Beulich <JBeulich@xxxxxxxx> AuthorDate: Wed, 27 Mar 2019 09:15:19 -0600 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Wed, 10 Apr 2019 09:53:31 +0200 x86/asm: Modernize sync_bitops.h Add missing instruction suffixes and use rmwcc.h just like was (more or less) recently done for bitops.h as well, see: 22636f8c9511: x86/asm: Add instruction suffixes to bitops 288e4521f0f6: x86/asm: 'Simplify' GEN_*_RMWcc() macros No change in functionality intended. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Brian Gerst <brgerst@xxxxxxxxx> Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx> Cc: H. Peter Anvin <hpa@xxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Link: http://lkml.kernel.org/r/5C9B93870200007800222289@xxxxxxxxxxxxxxxxxxxxxxxx [ Cleaned up the changelog a bit. ] Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- arch/x86/include/asm/sync_bitops.h | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h index 2fe745356fb1..6d8d6bc183b7 100644 --- a/arch/x86/include/asm/sync_bitops.h +++ b/arch/x86/include/asm/sync_bitops.h @@ -14,6 +14,8 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ +#include <asm/rmwcc.h> + #define ADDR (*(volatile long *)addr) /** @@ -29,7 +31,7 @@ */ static inline void sync_set_bit(long nr, volatile unsigned long *addr) { - asm volatile("lock; bts %1,%0" + asm volatile("lock; " __ASM_SIZE(bts) " %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory"); @@ -47,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr) */ static inline void sync_clear_bit(long nr, volatile unsigned long *addr) { - asm volatile("lock; btr %1,%0" + asm volatile("lock; " __ASM_SIZE(btr) " %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory"); @@ -64,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr) */ static inline void sync_change_bit(long nr, volatile unsigned long *addr) { - asm volatile("lock; btc %1,%0" + asm volatile("lock; " __ASM_SIZE(btc) " %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory"); @@ -78,14 +80,9 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr) +static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr) { - unsigned char oldbit; - - asm volatile("lock; bts %2,%1\n\tsetc %0" - : "=qm" (oldbit), "+m" (ADDR) - : "Ir" (nr) : "memory"); - return oldbit; + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); } /** @@ -98,12 +95,7 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) { - unsigned char oldbit; - - asm volatile("lock; btr %2,%1\n\tsetc %0" - : "=qm" (oldbit), "+m" (ADDR) - : "Ir" (nr) : "memory"); - return oldbit; + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); } /** @@ -116,12 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) { - unsigned char oldbit; - - asm volatile("lock; btc %2,%1\n\tsetc %0" - : "=qm" (oldbit), "+m" (ADDR) - : "Ir" (nr) : "memory"); - return oldbit; + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); } #define sync_test_bit(nr, addr) test_bit(nr, addr)