This is a note to let you know that I've just added the patch titled x86/bhi: Add support for clearing branch history at syscall entry to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-bhi-add-support-for-clearing-branch-history-at-syscall-entry.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From b3eadeb8ecaacb8181b7fc263ce4a7d60d3fa6ca Mon Sep 17 00:00:00 2001 From: Pawan Gupta <pawan.kumar.gupta@xxxxxxxxxxxxxxx> Date: Mon, 11 Mar 2024 08:56:58 -0700 Subject: x86/bhi: Add support for clearing branch history at syscall entry From: Pawan Gupta <pawan.kumar.gupta@xxxxxxxxxxxxxxx> commit 7390db8aea0d64e9deb28b8e1ce716f5020c7ee5 upstream. Branch History Injection (BHI) attacks may allow a malicious application to influence indirect branch prediction in kernel by poisoning the branch history. eIBRS isolates indirect branch targets in ring0. The BHB can still influence the choice of indirect branch predictor entry, and although branch predictor entries are isolated between modes when eIBRS is enabled, the BHB itself is not isolated between modes. Alder Lake and new processors supports a hardware control BHI_DIS_S to mitigate BHI. For older processors Intel has released a software sequence to clear the branch history on parts that don't support BHI_DIS_S. Add support to execute the software sequence at syscall entry and VMexit to overwrite the branch history. For now, branch history is not cleared at interrupt entry, as malicious applications are not believed to have sufficient control over the registers, since previous register state is cleared at interrupt entry. Researchers continue to poke at this area and it may become necessary to clear at interrupt entry as well in the future. This mitigation is only defined here. It is enabled later. Signed-off-by: Pawan Gupta <pawan.kumar.gupta@xxxxxxxxxxxxxxx> Co-developed-by: Daniel Sneddon <daniel.sneddon@xxxxxxxxxxxxxxx> Signed-off-by: Daniel Sneddon <daniel.sneddon@xxxxxxxxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Reviewed-by: Alexandre Chartre <alexandre.chartre@xxxxxxxxxx> Reviewed-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Signed-off-by: Daniel Sneddon <daniel.sneddon@xxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/entry/entry_64.S | 61 +++++++++++++++++++++++++++++++++++ arch/x86/entry/entry_64_compat.S | 3 + arch/x86/include/asm/cpufeatures.h | 8 ++++ arch/x86/include/asm/nospec-branch.h | 12 ++++++ arch/x86/kvm/vmx/vmenter.S | 2 + 5 files changed, 86 insertions(+) --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -114,6 +114,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_h /* clobbers %rax, make sure it is after saving the syscall nr */ IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY call do_syscall_64 /* returns with IRQs disabled */ @@ -1510,3 +1511,63 @@ SYM_CODE_START(rewind_stack_and_make_dea call make_task_dead SYM_CODE_END(rewind_stack_and_make_dead) .popsection + +/* + * This sequence executes branches in order to remove user branch information + * from the branch history tracker in the Branch Predictor, therefore removing + * user influence on subsequent BTB lookups. + * + * It should be used on parts prior to Alder Lake. Newer parts should use the + * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being + * virtualized on newer hardware the VMM should protect against BHI attacks by + * setting BHI_DIS_S for the guests. + * + * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging + * and not clearing the branch history. The call tree looks like: + * + * call 1 + * call 2 + * call 2 + * call 2 + * call 2 + * call 2 + * ret + * ret + * ret + * ret + * ret + * ret + * + * This means that the stack is non-constant and ORC can't unwind it with %rsp + * alone. Therefore we unconditionally set up the frame pointer, which allows + * ORC to unwind properly. + * + * The alignment is for performance and not for safety, and may be safely + * refactored in the future if needed. + */ +SYM_FUNC_START(clear_bhb_loop) + push %rbp + mov %rsp, %rbp + movl $5, %ecx + ANNOTATE_INTRA_FUNCTION_CALL + call 1f + jmp 5f + .align 64, 0xcc + ANNOTATE_INTRA_FUNCTION_CALL +1: call 2f + RET + .align 64, 0xcc +2: movl $5, %eax +3: jmp 4f + nop +4: sub $1, %eax + jnz 3b + sub $1, %ecx + jnz 1b + RET +5: lfence + pop %rbp + RET +SYM_FUNC_END(clear_bhb_loop) +EXPORT_SYMBOL_GPL(clear_bhb_loop) +STACK_FRAME_NON_STANDARD(clear_bhb_loop) --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_af IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY /* * SYSENTER doesn't filter flags, so we need to clear NT and AC @@ -259,6 +260,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_aft IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY movq %rsp, %rdi call do_fast_syscall_32 @@ -422,6 +424,7 @@ SYM_CODE_START(entry_INT80_compat) IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY movq %rsp, %rdi call do_int80_syscall_32 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -422,6 +422,14 @@ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ /* + * Extended auxiliary flags: Linux defined - for features scattered in various + * CPUID levels like 0x80000022, etc and Linux defined features. + * + * Reuse free bits when adding new feature flags! + */ +#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */ + +/* * BUG word(s) */ #define X86_BUG(x) (NCAPINTS*32 + (x)) --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -203,6 +203,14 @@ .Lskip_verw_\@: .endm +#ifdef CONFIG_X86_64 +.macro CLEAR_BRANCH_HISTORY + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP +.endm +#else +#define CLEAR_BRANCH_HISTORY +#endif + #else /* __ASSEMBLY__ */ #define ANNOTATE_RETPOLINE_SAFE \ @@ -228,6 +236,10 @@ extern void srso_alias_untrain_ret(void) extern void entry_untrain_ret(void); extern void entry_ibpb(void); +#ifdef CONFIG_X86_64 +extern void clear_bhb_loop(void); +#endif + extern void (*x86_return_thunk)(void); #ifdef CONFIG_RETPOLINE --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -213,6 +213,8 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL call vmx_spec_ctrl_restore_host + CLEAR_BRANCH_HISTORY + /* Put return value in AX */ mov %_ASM_BX, %_ASM_AX Patches currently in stable-queue which might be from pawan.kumar.gupta@xxxxxxxxxxxxxxx are queue-5.15/x86-rfds-mitigate-register-file-data-sampling-rfds.patch queue-5.15/x86-bhi-define-spec_ctrl_bhi_dis_s.patch queue-5.15/x86-entry_32-add-verw-just-before-userspace-transition.patch queue-5.15/x86-bugs-add-asm-helpers-for-executing-verw.patch queue-5.15/x86-bhi-add-support-for-clearing-branch-history-at-syscall-entry.patch queue-5.15/x86-bhi-mitigate-kvm-by-default.patch queue-5.15/kvm-x86-export-rfds_no-and-rfds_clear-to-guests.patch queue-5.15/x86-asm-add-_asm_rip-macro-for-x86-64-rip-suffix.patch queue-5.15/x86-bhi-enumerate-branch-history-injection-bhi-bug.patch queue-5.15/x86-entry_64-add-verw-just-before-userspace-transition.patch queue-5.15/x86-mmio-disable-kvm-mitigation-when-x86_feature_clear_cpu_buf-is-set.patch queue-5.15/x86-bugs-use-alternative-instead-of-mds_user_clear-static-key.patch queue-5.15/documentation-hw-vuln-add-documentation-for-rfds.patch queue-5.15/kvm-vmx-use-bt-jnc-i.e.-eflags.cf-to-select-vmresume-vs.-vmlaunch.patch queue-5.15/kvm-vmx-move-verw-closer-to-vmentry-for-mds-mitigation.patch queue-5.15/x86-bhi-add-bhi-mitigation-knob.patch queue-5.15/kvm-x86-add-bhi_no.patch