!LLSC atomics use spinlock (SMP) or irq-disable (UP) to implement criticla regions. UP atomic_set() however was "cheating" by not doing any of that so and still being functional. Remove this anomaly (primarily as cleanup for future code improvements) given that this config is not worth hassle of special case code. Signed-off-by: Vineet Gupta <vgupta@xxxxxxxxxxxx> --- arch/arc/include/asm/atomic-spinlock.h | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h index bdf87610b2d7..8c6fd0e651e5 100644 --- a/arch/arc/include/asm/atomic-spinlock.h +++ b/arch/arc/include/asm/atomic-spinlock.h @@ -3,12 +3,10 @@ #ifndef _ASM_ARC_ATOMIC_SPLOCK_H #define _ASM_ARC_ATOMIC_SPLOCK_H -#ifndef CONFIG_SMP - - /* violating atomic_xxx API locking protocol in UP for optimization sake */ -#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#else +/* + * Non hardware assisted Atomic-R-M-W + * Locking would change to irq-disabling only (UP) and spinlocks (SMP) + */ static inline void arch_atomic_set(atomic_t *v, int i) { @@ -30,13 +28,6 @@ static inline void arch_atomic_set(atomic_t *v, int i) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) -#endif - -/* - * Non hardware assisted Atomic-R-M-W - * Locking would change to irq-disabling only (UP) and spinlocks (SMP) - */ - #define ATOMIC_OP(op, c_op, asm_op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ -- 2.25.1