This is a note to let you know that I've just added the patch titled x86/alternative: Make custom return thunk unconditional to the 6.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-alternative-make-custom-return-thunk-unconditional.patch and it can be found in the queue-6.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 095b8303f3835c68ac4a8b6d754ca1c3b6230711 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Date: Mon, 14 Aug 2023 13:44:30 +0200 Subject: x86/alternative: Make custom return thunk unconditional From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit 095b8303f3835c68ac4a8b6d754ca1c3b6230711 upstream. There is infrastructure to rewrite return thunks to point to any random thunk one desires, unwrap that from CALL_THUNKS, which up to now was the sole user of that. [ bp: Make the thunks visible on 32-bit and add ifdeffery for the 32-bit builds. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx> Link: https://lore.kernel.org/r/20230814121148.775293785@xxxxxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/nospec-branch.h | 9 +++++---- arch/x86/kernel/alternative.c | 4 ---- arch/x86/kernel/cpu/bugs.c | 2 ++ 3 files changed, 7 insertions(+), 8 deletions(-) --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -337,17 +337,18 @@ extern retpoline_thunk_t __x86_indirect_ extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -571,10 +571,6 @@ void __init_or_module noinline apply_ret #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { Patches currently in stable-queue which might be from peterz@xxxxxxxxxxxxx are queue-6.4/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch queue-6.4/x86-static_call-fix-__static_call_fixup.patch queue-6.4/x86-cpu-cleanup-the-untrain-mess.patch queue-6.4/iopoll-call-cpu_relax-in-busy-loops.patch queue-6.4/objtool-x86-fixup-frame-pointer-vs-rethunk.patch queue-6.4/x86-retpoline-kprobes-skip-optprobe-check-for-indirect-jumps-with-retpolines-and-ibt.patch queue-6.4/x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch queue-6.4/x86-cpu-kvm-provide-untrain_ret_vm.patch queue-6.4/x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch queue-6.4/x86-alternative-make-custom-return-thunk-unconditional.patch queue-6.4/x86-cpu-rename-original-retbleed-methods.patch queue-6.4/objtool-x86-fix-srso-mess.patch queue-6.4/x86-cpu-clean-up-srso-return-thunk-mess.patch queue-6.4/x86-cpu-fix-__x86_return_thunk-symbol-type.patch