The following commit has been merged into the x86/core branch of tip: Commit-ID: bacde25dbbad6180b434332ad829b9e2f8064a56 Gitweb: https://git.kernel.org/tip/bacde25dbbad6180b434332ad829b9e2f8064a56 Author: Brian Gerst <brgerst@xxxxxxxxx> AuthorDate: Mon, 03 Mar 2025 11:52:40 -05:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Tue, 04 Mar 2025 11:24:28 +01:00 x86/retbleed: Move call depth to percpu hot section No functional change. Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Acked-by: Uros Bizjak <ubizjak@xxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20250303165246.2175811-6-brgerst@xxxxxxxxx --- arch/x86/include/asm/current.h | 3 --- arch/x86/include/asm/nospec-branch.h | 11 ++++++----- arch/x86/kernel/asm-offsets.c | 3 --- arch/x86/kernel/cpu/common.c | 8 ++++++++ arch/x86/lib/retpoline.S | 2 +- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index f988462..8ba2c0f 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -14,9 +14,6 @@ struct task_struct; struct pcpu_hot { struct task_struct *current_task; -#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING - u64 call_depth; -#endif unsigned long top_of_stack; void *hardirq_stack_ptr; u16 softirq_pending; diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index aee26bb..44c6076 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -12,7 +12,6 @@ #include <asm/msr-index.h> #include <asm/unwind_hints.h> #include <asm/percpu.h> -#include <asm/current.h> /* * Call depth tracking for Intel SKL CPUs to address the RSB underflow @@ -78,21 +77,21 @@ #include <asm/asm-offsets.h> #define CREDIT_CALL_DEPTH \ - movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); + movq $-1, PER_CPU_VAR(__x86_call_depth); #define RESET_CALL_DEPTH \ xor %eax, %eax; \ bts $63, %rax; \ - movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); + movq %rax, PER_CPU_VAR(__x86_call_depth); #define RESET_CALL_DEPTH_FROM_CALL \ movb $0xfc, %al; \ shl $56, %rax; \ - movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ + movq %rax, PER_CPU_VAR(__x86_call_depth); \ CALL_THUNKS_DEBUG_INC_CALLS #define INCREMENT_CALL_DEPTH \ - sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ + sarq $5, PER_CPU_VAR(__x86_call_depth); \ CALL_THUNKS_DEBUG_INC_CALLS #else @@ -387,6 +386,8 @@ extern void call_depth_return_thunk(void); __stringify(INCREMENT_CALL_DEPTH), \ X86_FEATURE_CALL_DEPTH) +DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth); + #ifdef CONFIG_CALL_THUNKS_DEBUG DECLARE_PER_CPU(u64, __x86_call_count); DECLARE_PER_CPU(u64, __x86_ret_count); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index a98020b..6fae88f 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -109,9 +109,6 @@ static void __used common(void) OFFSET(TSS_sp2, tss_struct, x86_tss.sp2); OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack); OFFSET(X86_current_task, pcpu_hot, current_task); -#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING - OFFSET(X86_call_depth, pcpu_hot, call_depth); -#endif #if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) /* Offset for fields in aria_ctx */ BLANK(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a9d6153..fd224ae 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2075,6 +2075,14 @@ DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); #ifdef CONFIG_X86_64 +/* + * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING + * so that this space is reserved in the hot cache section even when the + * mitigation is disabled. + */ +DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth); +EXPORT_PER_CPU_SYMBOL(__x86_call_depth); + static void wrmsrl_cstar(unsigned long val) { /* diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 038f49a..a26c43a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -343,7 +343,7 @@ SYM_FUNC_START(call_depth_return_thunk) * case. */ CALL_THUNKS_DEBUG_INC_RETS - shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth) + shlq $5, PER_CPU_VAR(__x86_call_depth) jz 1f ANNOTATE_UNRET_SAFE ret
![]() |