Repleace smp_mb() in arch_write_unlock() and __clear_bit_unlock() to smp_mb__before_llsc() call which does "release" barrier functionality. It seems like it was missed in commit f252ffd50c97dae87b45f1dbad24f71358ccfbd6 during introduction of "acquire" and "release" semantics. Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@xxxxxxxxxx> --- arch/mips/include/asm/bitops.h | 2 +- arch/mips/include/asm/spinlock.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 0cf29bd5dc5c..ce9666cf1499 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -469,7 +469,7 @@ static inline int test_and_change_bit(unsigned long nr, */ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) { - smp_mb(); + smp_mb__before_llsc(); __clear_bit(nr, addr); } diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 1fca2e0793dc..7c7f3b2bd3de 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -317,7 +317,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw) { - smp_mb(); + smp_mb__before_llsc(); __asm__ __volatile__( " # arch_write_unlock \n"