This is a note to let you know that I've just added the patch titled KVM/x86: Remove indirect MSR op calls from SPEC_CTRL to the 4.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: kvm-x86-remove-indirect-msr-op-calls-from-spec_ctrl.patch and it can be found in the queue-4.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From ecb586bd29c99fb4de599dec388658e74388daad Mon Sep 17 00:00:00 2001 From: Paolo Bonzini <pbonzini@xxxxxxxxxx> Date: Thu, 22 Feb 2018 16:43:17 +0100 Subject: KVM/x86: Remove indirect MSR op calls from SPEC_CTRL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: Paolo Bonzini <pbonzini@xxxxxxxxxx> commit ecb586bd29c99fb4de599dec388658e74388daad upstream. Having a paravirt indirect call in the IBRS restore path is not a good idea, since we are trying to protect from speculative execution of bogus indirect branch targets. It is also slower, so use native_wrmsrl() on the vmentry path too. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Reviewed-by: Jim Mattson <jmattson@xxxxxxxxxx> Cc: David Woodhouse <dwmw@xxxxxxxxxxxx> Cc: KarimAllah Ahmed <karahmed@xxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Radim Krčmář <rkrcmar@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: kvm@xxxxxxxxxxxxxxx Cc: stable@xxxxxxxxxxxxxxx Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@xxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/svm.c | 7 ++++--- arch/x86/kvm/vmx.c | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -45,6 +45,7 @@ #include <asm/debugreg.h> #include <asm/kvm_para.h> #include <asm/irq_remapping.h> +#include <asm/microcode.h> #include <asm/nospec-branch.h> #include <asm/virtext.h> @@ -5029,7 +5030,7 @@ static void svm_vcpu_run(struct kvm_vcpu * being speculatively taken. */ if (svm->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); asm volatile ( "push %%" _ASM_BP "; \n\t" @@ -5139,10 +5140,10 @@ static void svm_vcpu_run(struct kvm_vcpu * save it. */ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); if (svm->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -51,6 +51,7 @@ #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/mmu_context.h> +#include <asm/microcode.h> #include <asm/nospec-branch.h> #include "trace.h" @@ -9443,7 +9444,7 @@ static void __noclone vmx_vcpu_run(struc * being speculatively taken. */ if (vmx->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); vmx->__launched = vmx->loaded_vmcs->launched; asm( @@ -9579,10 +9580,10 @@ static void __noclone vmx_vcpu_run(struc * save it. */ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); if (vmx->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); Patches currently in stable-queue which might be from pbonzini@xxxxxxxxxx are queue-4.15/kvm-x86-fix-smram-accessing-even-if-vm-is-shutdown.patch queue-4.15/kvm-x86-remove-indirect-msr-op-calls-from-spec_ctrl.patch queue-4.15/kvm-x86-move-lapic-initialization-after-vmcs-creation.patch queue-4.15/kvm-mmu-fix-overlap-between-public-and-private-memslots.patch queue-4.15/kvm-vmx-optimize-vmx_vcpu_run-and-svm_vcpu_run-by-marking-the-rdmsr-path-as-unlikely.patch