4.9-stable review patch. If anyone has any objections, please let me know. ------------------ From: Paolo Bonzini <pbonzini@xxxxxxxxxx> commit 8e0b2b916662e09dd4d09e5271cdf214c6b80e62 upstream Bit 3 of ARCH_CAPABILITIES tells a hypervisor that L1D flush on vmentry is not needed. Add a new value to enum vmx_l1d_flush_state, which is used either if there is no L1TF bug at all, or if bit 3 is set in ARCH_CAPABILITIES. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/include/asm/vmx.h | 1 + arch/x86/kernel/cpu/bugs.c | 1 + arch/x86/kvm/vmx.c | 10 ++++++++++ 4 files changed, 13 insertions(+) --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -63,6 +63,7 @@ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ #define ARCH_CAP_SSB_NO (1 << 4) /* * Not susceptible to Speculative Store Bypass * attack, so no Speculative Store Bypass --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -505,6 +505,7 @@ enum vmx_l1d_flush_state { VMENTER_L1D_FLUSH_COND, VMENTER_L1D_FLUSH_ALWAYS, VMENTER_L1D_FLUSH_EPT_DISABLED, + VMENTER_L1D_FLUSH_NOT_REQUIRED, }; extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -730,6 +730,7 @@ static const char *l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" }; static ssize_t l1tf_show_state(char *buf) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -219,6 +219,16 @@ static int vmx_setup_l1d_flush(enum vmx_ return 0; } + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { + u64 msr; + + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; + } + } + /* If set to auto use the default l1tf mitigation method */ if (l1tf == VMENTER_L1D_FLUSH_AUTO) { switch (l1tf_mitigation) {