Simply remove the ifdef. The assembly is identical to that in the non-optimised case of test_and_clear_bits() on PPC32, and it's not clear to me how the PPC32 optimisation works, nor whether it would work for arch_xor_unlock_is_negative_byte(). If that optimisation would work, someone can implement it later, but this is more efficient than the implementation in filemap.c. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- arch/powerpc/include/asm/bitops.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 40cc3ded60cb..671ecc6711e3 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -233,7 +233,6 @@ static inline int arch_test_and_change_bit(unsigned long nr, return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -#ifdef CONFIG_PPC64 static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *p) { @@ -251,11 +250,8 @@ static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, return (old & BIT_MASK(7)) != 0; } - #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte -#endif /* CONFIG_PPC64 */ - #include <asm-generic/bitops/non-atomic.h> static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) -- 2.40.1