This is a note to let you know that I've just added the patch titled x86/entry: Add kernel IBRS implementation to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-entry-add-kernel-ibrs-implementation.patch and it can be found in the queue-4.14 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Mon Oct 31 07:55:50 AM CET 2022 From: Suraj Jitindar Singh <surajjs@xxxxxxxxxx> Date: Thu, 27 Oct 2022 13:54:52 -0700 Subject: x86/entry: Add kernel IBRS implementation To: <stable@xxxxxxxxxxxxxxx> Cc: <surajjs@xxxxxxxxxx>, <sjitindarsingh@xxxxxxxxx>, <cascardo@xxxxxxxxxxxxx>, <kvm@xxxxxxxxxxxxxxx>, <pbonzini@xxxxxxxxxx>, <jpoimboe@xxxxxxxxxx>, <peterz@xxxxxxxxxxxxx>, <x86@xxxxxxxxxx> Message-ID: <20221027205452.17271-4-surajjs@xxxxxxxxxx> From: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxxxxx> commit 2dbb887e875b1de3ca8f40ddf26bcfe55798c609 upstream. Implement Kernel IBRS - currently the only known option to mitigate RSB underflow speculation issues on Skylake hardware. Note: since IBRS_ENTER requires fuller context established than UNTRAIN_RET, it must be placed after it. However, since UNTRAIN_RET itself implies a RET, it must come after IBRS_ENTER. This means IBRS_ENTER needs to also move UNTRAIN_RET. Note 2: KERNEL_IBRS is sub-optimal for XenPV. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Borislav Petkov <bp@xxxxxxx> Reviewed-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Signed-off-by: Borislav Petkov <bp@xxxxxxx> [cascardo: conflict at arch/x86/entry/entry_64_compat.S] [cascardo: conflict fixups, no ANNOTATE_NOENDBR] Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxxxxx> [ bp: Adjust context ] Signed-off-by: Suraj Jitindar Singh <surajjs@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/entry/calling.h | 58 +++++++++++++++++++++++++++++++++++++ arch/x86/entry/entry_64.S | 33 +++++++++++++++++++++ arch/x86/entry/entry_64_compat.S | 12 +++++++ arch/x86/include/asm/cpufeatures.h | 2 - 4 files changed, 103 insertions(+), 2 deletions(-) --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,6 +6,8 @@ #include <asm/percpu.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> +#include <asm/msr.h> +#include <asm/nospec-branch.h> /* @@ -329,6 +331,62 @@ For 32-bit we have the following convent #endif /* + * IBRS kernel mitigation for Spectre_v2. + * + * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers + * the regs it uses (AX, CX, DX). Must be called before the first RET + * instruction (NOTE! UNTRAIN_RET includes a RET instruction) + * + * The optional argument is used to save/restore the current value, + * which is used on the paranoid paths. + * + * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. + */ +.macro IBRS_ENTER save_reg + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + rdmsr + shl $32, %rdx + or %rdx, %rax + mov %rax, \save_reg + test $SPEC_CTRL_IBRS, %eax + jz .Ldo_wrmsr_\@ + lfence + jmp .Lend_\@ +.Ldo_wrmsr_\@: +.endif + + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +.endm + +/* + * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) + * regs. Must be called after the last RET. + */ +.macro IBRS_EXIT save_reg + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + mov \save_reg, %rdx +.else + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + andl $(~SPEC_CTRL_IBRS), %edx +.endif + + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +.endm + +/* * Mitigate Spectre v1 for conditional swapgs code paths. * * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -230,6 +230,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) /* IRQs are off. */ movq %rsp, %rdi + + /* clobbers %rax, make sure it is after saving the syscall nr */ + IBRS_ENTER + call do_syscall_64 /* returns with IRQs disabled */ TRACE_IRQS_IRETQ /* we're about to change IF */ @@ -301,6 +305,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: + IBRS_EXIT POP_REGS pop_rdi=0 /* @@ -590,6 +595,7 @@ GLOBAL(retint_user) TRACE_IRQS_IRETQ GLOBAL(swapgs_restore_regs_and_return_to_usermode) + IBRS_EXIT #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -1133,6 +1139,9 @@ idtentry machine_check do_mce has_err * Save all registers in pt_regs, and switch gs if needed. * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise + * + * R14 - old CR3 + * R15 - old SPEC_CTRL */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC @@ -1156,6 +1165,12 @@ ENTRY(paranoid_entry) */ FENCE_SWAPGS_KERNEL_ENTRY + /* + * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like + * CR3 above, keep the old value in a callee saved register. + */ + IBRS_ENTER save_reg=%r15 + ret END(paranoid_entry) @@ -1170,9 +1185,19 @@ END(paranoid_entry) * to try to handle preemption here. * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) + * + * R14 - old CR3 + * R15 - old SPEC_CTRL */ ENTRY(paranoid_exit) UNWIND_HINT_REGS + + /* + * Must restore IBRS state before both CR3 and %GS since we need access + * to the per-CPU x86_spec_ctrl_shadow variable. + */ + IBRS_EXIT save_reg=%r15 + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ @@ -1207,8 +1232,10 @@ ENTRY(error_entry) FENCE_SWAPGS_USER_ENTRY /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ popq %r12 /* save return addr in %12 */ movq %rsp, %rdi /* arg0 = pt_regs pointer */ @@ -1271,6 +1298,7 @@ ENTRY(error_entry) SWAPGS FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER /* * Pretend that the exception came from user mode: set up pt_regs @@ -1376,6 +1404,8 @@ ENTRY(nmi) PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER + IBRS_ENTER + /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're @@ -1599,6 +1629,9 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ + IBRS_EXIT save_reg=%r15 + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -4,7 +4,6 @@ * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ -#include "calling.h" #include <asm/asm-offsets.h> #include <asm/current.h> #include <asm/errno.h> @@ -17,6 +16,8 @@ #include <linux/linkage.h> #include <linux/err.h> +#include "calling.h" + .section .entry.text, "ax" /* @@ -106,6 +107,8 @@ ENTRY(entry_SYSENTER_compat) xorl %r15d, %r15d /* nospec r15 */ cld + IBRS_ENTER + /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether @@ -250,6 +253,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram */ TRACE_IRQS_OFF + IBRS_ENTER + movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ @@ -259,6 +264,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram /* Opportunistic SYSRET */ sysret32_from_system_call: TRACE_IRQS_ON /* User mode traces as IRQs on. */ + + IBRS_EXIT + movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ @@ -385,6 +393,8 @@ ENTRY(entry_INT80_compat) */ TRACE_IRQS_OFF + IBRS_ENTER + movq %rsp, %rdi call do_int80_syscall_32 .Lsyscall_32_done: --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -202,7 +202,7 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ -/* FREE! ( 7*32+12) */ +#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ /* FREE! ( 7*32+13) */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ Patches currently in stable-queue which might be from surajjs@xxxxxxxxxx are queue-4.14/x86-speculation-disable-rrsba-behavior.patch queue-4.14/x86-bugs-report-intel-retbleed-vulnerability.patch queue-4.14/x86-entry-add-kernel-ibrs-implementation.patch queue-4.14/x86-bugs-warn-when-ibrs-mitigation-is-selected-on-enhanced-ibrs-parts.patch queue-4.14/x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch queue-4.14/x86-speculation-fix-spec_ctrl-write-on-smt-state-change.patch queue-4.14/entel_idle-disable-ibrs-during-long-idle.patch queue-4.14/x86-speculation-use-cached-host-spec_ctrl-value-for-guest-entry-exit.patch queue-4.14/x86-speculation-change-fill_return_buffer-to-work-with-objtool.patch queue-4.14/x86-speculation-add-lfence-to-rsb-fill-sequence.patch queue-4.14/x86-bugs-report-amd-retbleed-vulnerability.patch queue-4.14/x86-speculation-add-spectre_v2-ibrs-option-to-support-kernel-ibrs.patch queue-4.14/x86-bugs-keep-a-per-cpu-ia32_spec_ctrl-value.patch queue-4.14/x86-speculation-fill-rsb-on-vmexit-for-ibrs.patch queue-4.14/x86-speculation-fix-rsb-filling-with-config_retpoline-n.patch queue-4.14/x86-speculation-add-rsb-vm-exit-protections.patch queue-4.14/x86-bugs-optimize-spec_ctrl-msr-writes.patch queue-4.14/kvm-vmx-fix-ibrs-handling-after-vmexit.patch queue-4.14/kvm-vmx-prevent-guest-rsb-poisoning-attacks-with-eibrs.patch queue-4.14/x86-speculation-fix-firmware-entry-spec_ctrl-handling.patch queue-4.14/x86-bugs-add-amd-retbleed-boot-parameter.patch queue-4.14/x86-cpu-add-consistent-cpu-match-macros.patch queue-4.14/x86-speculation-remove-x86_spec_ctrl_mask.patch queue-4.14/x86-speculation-use-declare_per_cpu-for-x86_spec_ctrl_current.patch queue-4.14/x86-cpufeature-fix-various-quality-problems-in-the-asm-cpu_device_hd.h-header.patch queue-4.14/x86-entry-remove-skip_r11rcx.patch queue-4.14/revert-x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch queue-4.14/x86-common-stamp-out-the-stepping-madness.patch queue-4.14/x86-cpu-amd-enumerate-btc_no.patch queue-4.14/x86-bugs-add-cannon-lake-to-retbleed-affected-cpu-list.patch queue-4.14/x86-devicetable-move-x86-specific-macro-out-of-generic-code.patch queue-4.14/x86-bugs-split-spectre_v2_select_mitigation-and-spectre_v2_user_select_mitigation.patch queue-4.14/x86-cpufeatures-move-retpoline-flags-to-word-11.patch queue-4.14/x86-cpufeature-add-facility-to-check-for-min-microcode-revisions.patch