The following commit has been merged into the x86/percpu branch of tip: Commit-ID: b90169b42a6f49ff2fe2e4d4ed0bbcf17fb8f1bf Gitweb: https://git.kernel.org/tip/b90169b42a6f49ff2fe2e4d4ed0bbcf17fb8f1bf Author: Uros Bizjak <ubizjak@xxxxxxxxx> AuthorDate: Sun, 24 Mar 2024 22:19:45 +01:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Mon, 25 Mar 2024 12:03:45 +01:00 x86/percpu: Do not use this_cpu_read_stable_8() for 32-bit targets this_cpu_read_stable() macro uses __pcpu_size_call_return() that unconditionally calls this_cpu_read_stable_8() also for 32-bit targets. This usage is ivalid as it will result in the generation of 64-bit MOVQ instruction on 32-bit targets via percpu_stable_op() macro. Since there is no generic support for this_cpu_read_stable_8() for 32-bit targets, the patch defines this_cpu_read_stable_8() to BUILD_BUG() when CONFIG_X86_64 is not defined. This way, we are sure that this_cpu_read_stable_8() won't actually be used for 32-bit targets, but it is still defined to prevent build failure. Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Ard Biesheuvel <ardb@xxxxxxxxxx> Link: https://lore.kernel.org/r/20240324212014.310189-1-ubizjak@xxxxxxxxx --- arch/x86/include/asm/percpu.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f6ddbaa..1f6404e 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -423,10 +423,6 @@ do { \ * actually per-thread variables implemented as per-CPU variables and * thus stable for the duration of the respective task. */ -#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) -#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) -#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) -#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) #ifdef CONFIG_USE_X86_SEG_SUPPORT @@ -495,6 +491,10 @@ do { \ #define this_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #endif /* CONFIG_USE_X86_SEG_SUPPORT */ +#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) +#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) +#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) + #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) @@ -546,6 +546,8 @@ do { \ * 32 bit must fall back to generic operations. */ #ifdef CONFIG_X86_64 +#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) + #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) @@ -561,6 +563,9 @@ do { \ #define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) +#else +/* There is no generic 64 bit read stable operation for 32 bit targets. */ +#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #endif static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,