This is backport of the upstream patch 8238b4579866b7c1bb99883cfe102a43db5506ff for the stable branch 4.9 Signed-off-by: Mikulas Patocka <mpatocka@xxxxxxxxxx> --- arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ include/linux/buffer_head.h | 2 +- include/linux/wait.h | 8 ++++---- kernel/sched/wait.c | 2 +- 5 files changed, 41 insertions(+), 6 deletions(-) Index: linux-stable/arch/x86/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/bitops.h 2022-09-30 16:01:38.000000000 +0200 +++ linux-stable/arch/x86/include/asm/bitops.h 2022-09-30 16:01:38.000000000 +0200 @@ -314,6 +314,20 @@ static __always_inline bool constant_tes (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -340,6 +354,13 @@ static bool test_bit(int nr, const volat ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search Index: linux-stable/include/asm-generic/bitops/non-atomic.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops/non-atomic.h 2022-09-30 16:01:38.000000000 +0200 +++ linux-stable/include/asm-generic/bitops/non-atomic.h 2022-09-30 16:01:38.000000000 +0200 @@ -2,6 +2,7 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> +#include <asm/barrier.h> /** * __set_bit - Set a bit in memory @@ -105,4 +106,17 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} +#define test_bit_acquire arch_test_bit_acquire + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ Index: linux-stable/include/linux/buffer_head.h =================================================================== --- linux-stable.orig/include/linux/buffer_head.h 2022-09-30 16:01:38.000000000 +0200 +++ linux-stable/include/linux/buffer_head.h 2022-09-30 16:01:38.000000000 +0200 @@ -162,7 +162,7 @@ static __always_inline int buffer_uptoda * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) Index: linux-stable/include/linux/wait.h =================================================================== --- linux-stable.orig/include/linux/wait.h 2022-09-30 16:01:38.000000000 +0200 +++ linux-stable/include/linux/wait.h 2022-09-30 16:01:38.000000000 +0200 @@ -1066,7 +1066,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -1091,7 +1091,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -1118,7 +1118,7 @@ wait_on_bit_timeout(unsigned long *word, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -1146,7 +1146,7 @@ wait_on_bit_action(unsigned long *word, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } Index: linux-stable/kernel/sched/wait.c =================================================================== --- linux-stable.orig/kernel/sched/wait.c 2022-09-30 16:01:38.000000000 +0200 +++ linux-stable/kernel/sched/wait.c 2022-09-30 16:01:58.000000000 +0200 @@ -389,7 +389,7 @@ __wait_on_bit(wait_queue_head_t *wq, str prepare_to_wait(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) ret = (*action)(&q->key, mode); - } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); + } while (test_bit_acquire(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; }