NAK. This destroys this_cpu functionality. This_cpu functions are there to *avoid* the overhead of full atomic RMV operations. Leave this_cpu alone and use cmpxchg directly if you want cmpxchg with full atomic semantics. On Fri, 3 Mar 2023, Andrew Morton wrote: > > The patch titled > Subject: this_cpu_cmpxchg: x86: switch this_cpu_cmpxchg to locked, add _local function > has been added to the -mm mm-unstable branch. Its filename is > this_cpu_cmpxchg-x86-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > > This patch will shortly appear at > https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/this_cpu_cmpxchg-x86-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > > This patch will later appear in the mm-unstable branch at > git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm > > Before you just go and hit "reply", please: > a) Consider who else should be cc'ed > b) Prefer to cc a suitable mailing list as well > c) Ideally: find the original patch on the mailing list and do a > reply-to-all to that, adding suitable additional cc's > > *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** > > The -mm tree is included into linux-next via the mm-everything > branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm > and is updated there every 2-3 working days > > ------------------------------------------------------ > From: Marcelo Tosatti <mtosatti@xxxxxxxxxx> > Subject: this_cpu_cmpxchg: x86: switch this_cpu_cmpxchg to locked, add _local function > Date: Fri, 03 Mar 2023 16:58:46 -0300 > > Goal is to have vmstat_shepherd to transfer from per-CPU counters to > global counters remotely. For this, an atomic this_cpu_cmpxchg is > necessary. > > Following the kernel convention for cmpxchg/cmpxchg_local, change x86's > this_cpu_cmpxchg_ helpers to be atomic. and add this_cpu_cmpxchg_local_ > helpers which are not atomic. > > Link: https://lkml.kernel.org/r/20230303195908.877083312@xxxxxxxxxx > Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> > Cc: Aaron Tomlin <atomlin@xxxxxxxxxxx> > Cc: Christoph Lameter <cl@xxxxxxxxx> > Cc: David Hildenbrand <david@xxxxxxxxxx> > Cc: Frederic Weisbecker <frederic@xxxxxxxxxx> > Cc: Heiko Carstens <hca@xxxxxxxxxxxxx> > Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> > Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> > Cc: Ingo Molnar <mingo@xxxxxxx> > Cc: Peter Xu <peterx@xxxxxxxxxx> > Cc: "Russell King (Oracle)" <linux@xxxxxxxxxxxxxxx> > Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> > Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > --- > > > --- a/arch/x86/include/asm/percpu.h~this_cpu_cmpxchg-x86-switch-this_cpu_cmpxchg-to-locked-add-_local-function > +++ a/arch/x86/include/asm/percpu.h > @@ -197,11 +197,11 @@ do { \ > * cmpxchg has no such implied lock semantics as a result it is much > * more efficient for cpu local operations. > */ > -#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ > +#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval, lockp) \ > ({ \ > __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \ > __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ > - asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ > + asm qual (__pcpu_op2_##size(lockp "cmpxchg", "%[nval]", \ > __percpu_arg([var])) \ > : [oval] "+a" (pco_old__), \ > [var] "+m" (_var) \ > @@ -279,16 +279,20 @@ do { \ > #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) > #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) > #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) > -#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) > -#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) > -#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) > +#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval, "") > +#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval, "") > +#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval, "") > > #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) > #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) > #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) > -#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) > -#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) > -#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) > +#define this_cpu_cmpxchg_local_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval, "") > +#define this_cpu_cmpxchg_local_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval, "") > +#define this_cpu_cmpxchg_local_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval, "") > + > +#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval, LOCK_PREFIX) > +#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval, LOCK_PREFIX) > +#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval, LOCK_PREFIX) > > #ifdef CONFIG_X86_CMPXCHG64 > #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ > @@ -319,16 +323,17 @@ do { \ > #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) > #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) > #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) > -#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) > +#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval, "") > > -#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) > -#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) > -#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) > -#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) > -#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) > -#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) > -#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) > -#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) > +#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) > +#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) > +#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) > +#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) > +#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) > +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) > +#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) > +#define this_cpu_cmpxchg_local_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval, "") > +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval, LOCK_PREFIX) > > /* > * Pretty complex macro to generate cmpxchg16 instruction. The instruction > _ > > Patches currently in -mm which might be from mtosatti@xxxxxxxxxx are > > mm-vmstat-remove-remote-node-draining.patch > this_cpu_cmpxchg-arm64-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > this_cpu_cmpxchg-loongarch-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > this_cpu_cmpxchg-s390-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > this_cpu_cmpxchg-x86-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch > add-this_cpu_cmpxchg_local-and-asm-generic-definitions.patch > convert-this_cpu_cmpxchg-users-to-this_cpu_cmpxchg_local.patch > mm-vmstat-switch-counter-modification-to-cmpxchg.patch > mm-vmstat-use-xchg-in-cpu_vm_stats_fold.patch > mm-vmstat-switch-vmstat-shepherd-to-flush-per-cpu-counters-remotely.patch > mm-vmstat-refresh-stats-remotely-instead-of-via-work-item.patch >