Inspired by the mips test_and_change_bit(), this will surely be more efficient than the generic one defined in filemap.c Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- arch/mips/include/asm/bitops.h | 26 +++++++++++++++++++++++++- arch/mips/lib/bitops.c | 14 ++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index b4bf754f7db3..d98a05c478f4 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -73,7 +73,8 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr); - +bool __mips_xor_is_negative_byte(unsigned long mask, + volatile unsigned long *addr); /* * set_bit - Atomically set a bit in memory @@ -279,6 +280,29 @@ static inline int test_and_change_bit(unsigned long nr, return res; } +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) +{ + unsigned long orig; + bool res; + + smp_mb__before_atomic(); + + if (!kernel_uses_llsc) { + res = __mips_xor_is_negative_byte(mask, p); + } else { + orig = __test_bit_op(*p, "%0", + "xor\t%1, %0, %3", + "ir"(mask)); + res = (orig & BIT(7)) != 0; + } + + smp_llsc_mb(); + + return res; +} +#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte + #undef __bit_op #undef __test_bit_op diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 116d0bd8b2ae..00aee98e9d54 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -146,3 +146,17 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return res; } EXPORT_SYMBOL(__mips_test_and_change_bit); + +bool __mips_xor_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) +{ + unsigned long flags; + unsigned long data; + + raw_local_irq_save(flags); + data = *addr; + *addr = data ^ mask; + raw_local_irq_restore(flags); + + return (data & BIT(7)) != 0; +} -- 2.40.1