In order to replace cmpxchg_double() with the newly minted cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg(). Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- arch/arm64/include/asm/percpu.h | 24 ++++++++++++++++++ arch/s390/include/asm/percpu.h | 20 +++++++++++++++ arch/x86/include/asm/percpu.h | 52 ++++++++++++++++++++++++++++++++++++++++ include/asm-generic/percpu.h | 8 ++++++ include/linux/percpu-defs.h | 20 +++++++++++++-- 5 files changed, 122 insertions(+), 2 deletions(-) --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -140,6 +140,10 @@ PERCPU_RET_OP(add, add, ldadd) * re-enabling preemption for preemptible kernels, but doing that in a way * which builds inside a module would mean messing directly with the preempt * count. If you do this, peterz and tglx will hunt you down. + * + * Not to mention it'll break the actual preemption model for missing a + * preemption point when TIF_NEED_RESCHED gets set while preemption is + * disabled. */ #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ ({ \ @@ -240,6 +244,26 @@ PERCPU_RET_OP(add, add, ldadd) #define this_cpu_cmpxchg_8(pcp, o, n) \ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define __pcpu_cast_128(_exp, _val) \ + _Generic((_exp), \ + u128: (_val), \ + s128: (_val), \ + default: (unsigned long)(_val)) + +#define this_cpu_cmpxchg_16(pcp, o, n) \ +({ \ + u128 old__ = __pcpu_cast_128((o), (o)); \ + u128 new__ = __pcpu_cast_128((n), (n)); \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ *ptr__; \ + u128 ret__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg128_local((void *)ptr__, old__, new__); \ + preempt_enable_notrace(); \ + (typeof(pcp))__pcpu_cast_128(*ptr__, ret__); \ +}) + #ifdef __KVM_NVHE_HYPERVISOR__ extern unsigned long __hyp_per_cpu_offset(unsigned int cpu); #define __per_cpu_offset --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -148,6 +148,26 @@ #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define __pcpu_cast_128(_exp, _val) \ + _Generic((_exp), \ + u128: (_val), \ + s128: (_val), \ + default: (unsigned long)(_val)) + +#define this_cpu_cmpxchg_16(pcp, oval, nval) \ +({ \ + u128 old__ = __pcpu_cast_128((nval), (nval)); \ + u128 new__ = __pcpu_cast_128((oval), (oval)); \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ *ptr__; \ + u128 ret__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg128((void *)ptr__, old__, new__); \ + preempt_enable_notrace(); \ + (typeof(pcp))__pcpu_cast_128(*ptr__, ret__); \ +}) + #define arch_this_cpu_xchg(pcp, nval) \ ({ \ typeof(pcp) *ptr__; \ --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -210,6 +210,58 @@ do { \ (typeof(_var))(unsigned long) pco_old__; \ }) +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_CMPXCHG64) +#define __pcpu_cast_64(_exp, _val) \ + _Generic((_exp), \ + u64: (_val), \ + s64: (_val), \ + default: (unsigned long)(_val)) + +#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ +({ \ + __pcpu_type_##size pco_old__ = __pcpu_cast_64((_oval), (_oval));\ + __pcpu_type_##size pco_new__ = __pcpu_cast_64((_nval), (_nval));\ + asm qual ("cmpxchg8b " __percpu_arg([var]) \ + : [var] "+m" (_var), \ + "+A" (pco_old__) \ + : "b" ((u32)pco_new__), "c" ((u32)(pco_new__ >> 32)) \ + : "memory"); \ + (typeof(_var))__pcpu_cast_64(_var, pco_old__); \ +}) + +#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) +#endif + +#ifdef CONFIG_X86_64 +#define __pcpu_cast_128(_exp, _val) \ + _Generic((_exp), \ + u128: (_val), \ + s128: (_val), \ + default: (unsigned long)(_val)) + +#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \ +({ \ + union __u128_halves pco_old__ = { \ + .full = __pcpu_cast_128((_oval), (_oval)) \ + }; \ + union __u128_halves pco_new__ = { \ + .full = __pcpu_cast_128((_nval), (_nval)) \ + }; \ + asm qual ("cmpxchg16b " __percpu_arg([var]) \ + : [var] "+m" (_var), \ + "+a" (pco_old__.low), \ + "+d" (pco_old__.high) \ + : "b" (pco_new__.low), \ + "c" (pco_new__.high) \ + : "memory"); \ + (typeof(_var))__pcpu_cast_128(_var, pco_old__.full); \ +}) + +#define raw_cpu_cmpxchg_16(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval) +#define this_cpu_cmpxchg_16(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) +#endif + /* * this_cpu_read() makes gcc load the percpu variable every time it is * accessed while this_cpu_read_stable() allows the value to be cached. --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -298,6 +298,10 @@ do { \ #define raw_cpu_cmpxchg_8(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif +#ifndef raw_cpu_cmpxchg_16 +#define raw_cpu_cmpxchg_16(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif #ifndef raw_cpu_cmpxchg_double_1 #define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ @@ -423,6 +427,10 @@ do { \ #define this_cpu_cmpxchg_8(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif +#ifndef this_cpu_cmpxchg_16 +#define this_cpu_cmpxchg_16(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif #ifndef this_cpu_cmpxchg_double_1 #define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -343,6 +343,22 @@ static inline void __this_cpu_preempt_ch pscr2_ret__; \ }) +#define __pcpu_size16_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + case 16: pscr2_ret__ = stem##16(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + /* * Special handling for cmpxchg_double. cmpxchg_double is passed two * percpu variables. The first has to be aligned to a double word @@ -425,7 +441,7 @@ do { \ #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) #define raw_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) + __pcpu_size16_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) @@ -512,7 +528,7 @@ do { \ #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) #define this_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) + __pcpu_size16_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)