----- pbonzini@xxxxxxxxxx wrote: > Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is > important > for performance. Allow load/store of MSR_IA32_SPEC_CTRL, restore > guest > IBRS on VM entry and set it to 0 on VM exit (because Linux does not > use > it yet). > > Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > --- > arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 42 insertions(+) > > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index 31ace8d7774a..934a21e02e03 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -183,6 +183,8 @@ struct vcpu_svm { > u64 gs_base; > } host; > > + u64 spec_ctrl; > + > u32 *msrpm; > > ulong nmi_iret_rip; > @@ -248,6 +250,8 @@ struct amd_svm_iommu_ir { > { .index = MSR_CSTAR, .always = true }, > { .index = MSR_SYSCALL_MASK, .always = true }, > #endif > + { .index = MSR_IA32_SPEC_CTRL, .always = true }, > + { .index = MSR_IA32_PRED_CMD, .always = true }, > { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, > { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, > { .index = MSR_IA32_LASTINTFROMIP, .always = false }, > @@ -283,6 +287,8 @@ struct amd_svm_iommu_ir { > /* enable/disable Virtual GIF */ > static int vgif = true; > module_param(vgif, int, 0444); > + > +static bool __read_mostly have_spec_ctrl; > > static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); > static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool > invalidate_gpa); > @@ -1135,6 +1141,17 @@ static __init int svm_hardware_setup(void) > pr_info("Virtual GIF supported\n"); > } > > + /* > + * FIXME: this is only needed until SPEC_CTRL is supported > + * by upstream Linux in cpufeatures, then it can be replaced > + * with static_cpu_has. > + */ > + have_spec_ctrl = cpu_has_spec_ctrl(); > + if (have_spec_ctrl) > + pr_info("kvm: SPEC_CTRL available\n"); > + else > + pr_info("kvm: SPEC_CTRL not available\n"); > + > return 0; > > err: > @@ -3599,6 +3616,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, > struct msr_data *msr_info) > case MSR_VM_CR: > msr_info->data = svm->nested.vm_cr_msr; > break; > + case MSR_IA32_SPEC_CTRL: > + msr_info->data = svm->spec_ctrl; > + break; > case MSR_IA32_UCODE_REV: > msr_info->data = 0x01000065; > break; > @@ -3754,6 +3774,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, > struct msr_data *msr) > case MSR_VM_IGNNE: > vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, > data); > break; > + case MSR_IA32_SPEC_CTRL: > + svm->spec_ctrl = data; > + break; > case MSR_IA32_APICBASE: > if (kvm_vcpu_apicv_active(vcpu)) > avic_update_vapic_bar(to_svm(vcpu), data); > @@ -4942,6 +4965,13 @@ static void svm_vcpu_run(struct kvm_vcpu > *vcpu) > > local_irq_enable(); > > + /* > + * MSR_IA32_SPEC_CTRL is restored after the last indirect branch > + * before vmentry. > + */ > + if (have_spec_ctrl && svm->spec_ctrl != 0) > + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); > + > asm volatile ( > "push %%" _ASM_BP "; \n\t" > "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" > @@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu > *vcpu) > #endif > ); > > + if (have_spec_ctrl) { > + rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); > + if (svm->spec_ctrl != 0) > + wrmsrl(MSR_IA32_SPEC_CTRL, 0); > + } > + /* > + * Speculative execution past the above wrmsrl might encounter > + * an indirect branch and use guest-controlled contents of the > + * indirect branch predictor; block it. > + */ > + asm("lfence"); > + > #ifdef CONFIG_X86_64 > wrmsrl(MSR_GS_BASE, svm->host.gs_base); > #else > -- > 1.8.3.1 Reviewed-by: Liran Alon <liran.alon@xxxxxxxxxx>