Use the new cmpxchg_emu_u8() to emulate one-byte cmpxchg() on arc. [ paulmck: Drop two-byte support per Arnd Bergmann feedback. ] [ paulmck: Apply feedback from Naresh Kamboju. ] [ paulmck: Apply kernel test robot feedback. ] Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxx> Cc: Andi Shyti <andi.shyti@xxxxxxxxxxxxxxx> Cc: Andrzej Hajda <andrzej.hajda@xxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxxx> Cc: <linux-snps-arc@xxxxxxxxxxxxxxxxxxx> --- arch/arc/Kconfig | 1 + arch/arc/include/asm/cmpxchg.h | 33 ++++++++++++++++++++++++--------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index fd0b0a0d4686a..163608fd49d18 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,6 +13,7 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_NEED_CMPXCHG_1_EMU select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index e138fde067dea..2102ce076f28b 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -8,6 +8,7 @@ #include <linux/build_bug.h> #include <linux/types.h> +#include <linux/cmpxchg-emu.h> #include <asm/barrier.h> #include <asm/smp.h> @@ -46,6 +47,9 @@ __typeof__(*(ptr)) _prev_; \ \ switch(sizeof((_p_))) { \ + case 1: \ + _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \ + break; \ case 4: \ _prev_ = __cmpxchg(_p_, _o_, _n_); \ break; \ @@ -65,16 +69,27 @@ __typeof__(*(ptr)) _prev_; \ unsigned long __flags; \ \ - BUILD_BUG_ON(sizeof(_p_) != 4); \ + switch(sizeof((_p_))) { \ + case 1: \ + __flags = cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \ + _prev_ = (__typeof__(*(ptr)))__flags; \ + break; \ + break; \ + case 4: \ + /* \ + * spin lock/unlock provide the needed smp_mb() \ + * before/after \ + */ \ + atomic_ops_lock(__flags); \ + _prev_ = *_p_; \ + if (_prev_ == _o_) \ + *_p_ = _n_; \ + atomic_ops_unlock(__flags); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ \ - /* \ - * spin lock/unlock provide the needed smp_mb() before/after \ - */ \ - atomic_ops_lock(__flags); \ - _prev_ = *_p_; \ - if (_prev_ == _o_) \ - *_p_ = _n_; \ - atomic_ops_unlock(__flags); \ _prev_; \ }) -- 2.40.1