From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit 42be649dd1f2eee6b1fb185f1a231b9494cf095f upstream. For a more consistent namespace. [ bp: Fixup names in the doc too. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx> Link: https://lore.kernel.org/r/20230814121148.976236447@xxxxxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- Documentation/admin-guide/hw-vuln/srso.rst | 4 ++-- arch/x86/include/asm/nospec-branch.h | 4 ++-- arch/x86/kernel/vmlinux.lds.S | 8 ++++---- arch/x86/lib/retpoline.S | 26 +++++++++++++------------- 4 files changed, 21 insertions(+), 21 deletions(-) --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -195,7 +195,7 @@ #ifdef CONFIG_CPU_SRSO ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm @@ -222,7 +222,7 @@ extern void srso_alias_return_thunk(void extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); extern void entry_ibpb(void); --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -141,10 +141,10 @@ SECTIONS #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); *(.text.__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END @@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -76,56 +76,56 @@ SYM_CODE_END(__x86_indirect_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO .section .text.__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence jmp srso_alias_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) .section .text.__x86.rethunk_safe #else /* dummy definition for alternatives */ -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) #endif -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) add $8, %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) .section .text.__x86.return_thunk SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - call srso_safe_ret_alias + call srso_alias_safe_ret ud2 SYM_CODE_END(srso_alias_return_thunk)