The following commit has been merged into the x86/percpu branch of tip: Commit-ID: 61d73e4f7d538f3907d954a531169e8164aef56b Gitweb: https://git.kernel.org/tip/61d73e4f7d538f3907d954a531169e8164aef56b Author: Ingo Molnar <mingo@xxxxxxxxxx> AuthorDate: Mon, 20 May 2024 10:22:39 +02:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Mon, 20 May 2024 10:56:23 +02:00 x86/percpu: Clean up <asm/percpu.h> a bit - Fix misc typos - There's 4 variants of the same spelling right now: 'per-CPU', 'per CPU', 'percpu' and 'per-cpu' Standardize on 'per-CPU' only. - s/makes gcc load /makes the compiler load - Instead of: #ifdef CONFIG_XXXX #define YYYY FOO #else #define YYYY BAR #endif Use the slightly more readable form of: #ifdef CONFIG_XXXX # define YYYY FOO #else # define YYYY BAR #endif - Standardize & expand '#else' and '#endif' comments - Fix comment style - Capitalize x86 instruction names in comments No change in code. Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Uros Bizjak <ubizjak@xxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx --- arch/x86/include/asm/percpu.h | 91 ++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0f0d897..b424cb1 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -3,30 +3,30 @@ #define _ASM_X86_PERCPU_H #ifdef CONFIG_X86_64 -#define __percpu_seg gs -#define __percpu_rel (%rip) +# define __percpu_seg gs +# define __percpu_rel (%rip) #else -#define __percpu_seg fs -#define __percpu_rel +# define __percpu_seg fs +# define __percpu_rel #endif #ifdef __ASSEMBLY__ #ifdef CONFIG_SMP -#define __percpu %__percpu_seg: +# define __percpu %__percpu_seg: #else -#define __percpu +# define __percpu #endif #define PER_CPU_VAR(var) __percpu(var)__percpu_rel #ifdef CONFIG_X86_64_SMP -#define INIT_PER_CPU_VAR(var) init_per_cpu__##var +# define INIT_PER_CPU_VAR(var) init_per_cpu__##var #else -#define INIT_PER_CPU_VAR(var) var +# define INIT_PER_CPU_VAR(var) var #endif -#else /* ...!ASSEMBLY */ +#else /* !__ASSEMBLY__: */ #include <linux/build_bug.h> #include <linux/stringify.h> @@ -37,19 +37,19 @@ #ifdef CONFIG_CC_HAS_NAMED_AS #ifdef __CHECKER__ -#define __seg_gs __attribute__((address_space(__seg_gs))) -#define __seg_fs __attribute__((address_space(__seg_fs))) +# define __seg_gs __attribute__((address_space(__seg_gs))) +# define __seg_fs __attribute__((address_space(__seg_fs))) #endif #ifdef CONFIG_X86_64 -#define __percpu_seg_override __seg_gs +# define __percpu_seg_override __seg_gs #else -#define __percpu_seg_override __seg_fs +# define __percpu_seg_override __seg_fs #endif #define __percpu_prefix "" -#else /* CONFIG_CC_HAS_NAMED_AS */ +#else /* !CONFIG_CC_HAS_NAMED_AS: */ #define __percpu_seg_override #define __percpu_prefix "%%"__stringify(__percpu_seg)":" @@ -80,7 +80,8 @@ #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel -#else /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ + #define __percpu_seg_override #define __percpu_prefix "" #define __force_percpu_prefix "" @@ -96,7 +97,7 @@ #define __force_percpu_arg(x) __force_percpu_prefix "%" #x /* - * Initialized pointers to per-cpu variables needed for the boot + * Initialized pointers to per-CPU variables needed for the boot * processor need to use these macros to get the proper address * offset from __per_cpu_load on SMP. * @@ -106,13 +107,15 @@ extern typeof(var) init_per_cpu_var(var) #ifdef CONFIG_X86_64_SMP -#define init_per_cpu_var(var) init_per_cpu__##var +# define init_per_cpu_var(var) init_per_cpu__##var #else -#define init_per_cpu_var(var) var +# define init_per_cpu_var(var) var #endif -/* For arch-specific code, we can use direct single-insn ops (they - * don't give an lvalue though). */ +/* + * For arch-specific code, we can use direct single-insn ops (they + * don't give an lvalue though). + */ #define __pcpu_type_1 u8 #define __pcpu_type_2 u16 @@ -158,7 +161,7 @@ do { \ #define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp) -#else /* CONFIG_USE_X86_SEG_SUPPORT */ +#else /* !CONFIG_USE_X86_SEG_SUPPORT: */ #define __raw_cpu_read(size, qual, _var) \ ({ \ @@ -183,7 +186,7 @@ do { \ } while (0) /* - * The generic per-cpu infrastrucutre is not suitable for + * The generic per-CPU infrastrucutre is not suitable for * reading const-qualified variables. */ #define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) @@ -219,7 +222,7 @@ do { \ } while (0) /* - * Generate a percpu add to memory instruction and optimize code + * Generate a per-CPU add to memory instruction and optimize code * if one is added or subtracted. */ #define percpu_add_op(size, qual, var, val) \ @@ -266,9 +269,9 @@ do { \ }) /* - * this_cpu_xchg() is implemented using cmpxchg without a lock prefix. - * xchg is expensive due to the implied lock prefix. The processor - * cannot prefetch cachelines if xchg is used. + * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix. + * XCHG is expensive due to the implied LOCK prefix. The processor + * cannot prefetch cachelines if XCHG is used. */ #define this_percpu_xchg_op(_var, _nval) \ ({ \ @@ -278,8 +281,8 @@ do { \ }) /* - * cmpxchg has no such implied lock semantics as a result it is much - * more efficient for cpu local operations. + * CMPXCHG has no such implied lock semantics as a result it is much + * more efficient for CPU-local operations. */ #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ ({ \ @@ -314,6 +317,7 @@ do { \ }) #if defined(CONFIG_X86_32) && !defined(CONFIG_UML) + #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ ({ \ union { \ @@ -374,7 +378,8 @@ do { \ #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval) #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval) -#endif + +#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */ #ifdef CONFIG_X86_64 #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); @@ -443,7 +448,8 @@ do { \ #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval) #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) -#endif + +#endif /* CONFIG_X86_64 */ #define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) #define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) @@ -510,8 +516,8 @@ do { \ #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) /* - * Per cpu atomic 64 bit operations are only available under 64 bit. - * 32 bit must fall back to generic operations. + * Per-CPU atomic 64-bit operations are only available under 64-bit kernels. + * 32-bit kernels must fall back to generic operations. */ #ifdef CONFIG_X86_64 #define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) @@ -539,20 +545,23 @@ do { \ #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) #define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) -#else -/* There is no generic 64 bit read stable operation for 32 bit targets. */ + +#else /* !CONFIG_X86_64: */ + +/* There is no generic 64-bit read stable operation for 32-bit targets. */ #define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) -#endif + +#endif /* CONFIG_X86_64 */ #define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) /* - * this_cpu_read() makes gcc load the percpu variable every time it is - * accessed while this_cpu_read_stable() allows the value to be cached. + * this_cpu_read() makes the compiler load the per-CPU variable every time + * it is accessed while this_cpu_read_stable() allows the value to be cached. * this_cpu_read_stable() is more efficient and can be used if its value - * is guaranteed to be valid across cpus. The current users include + * is guaranteed to be valid across CPUs. The current users include * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are * actually per-thread variables implemented as per-CPU variables and * thus stable for the duration of the respective task. @@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); #define early_per_cpu_ptr(_name) (_name##_early_ptr) #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) -#define early_per_cpu(_name, _cpu) \ +#define early_per_cpu(_name, _cpu) \ *(early_per_cpu_ptr(_name) ? \ &early_per_cpu_ptr(_name)[_cpu] : \ &per_cpu(_name, _cpu)) -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ DEFINE_PER_CPU(_type, _name) = _initvalue @@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); #define early_per_cpu_ptr(_name) NULL /* no early_per_cpu_map() */ -#endif /* !CONFIG_SMP */ +#endif /* CONFIG_SMP */ #endif /* _ASM_X86_PERCPU_H */