Current cpuid faulting of guest is purely emulated in kvm, which exploits CPUID vm exit to inject #GP to guest. However, if host hardware cpu has X86_FEATURE_CPUID_FAULT, we can just use the hardware cpuid faulting for guest to avoid the vm exit overhead. Note: cpuid faulting takes higher priority over CPUID instruction vm exit (Intel SDM vol3.25.1.1). Since cpuid faulting only exists on some Intel's cpu, just apply this optimization to vmx. Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 19 +++++++++++++++---- arch/x86/kvm/x86.c | 15 ++++++++++++--- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ce79d7bfe1fd..14cad587b804 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1339,6 +1339,8 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); +int kvm_supported_msr_misc_features_enables(struct kvm_vcpu *vcpu, u64 data); + int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2c59e0209e36..6b413e471dca 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1037,7 +1037,7 @@ static void pt_guest_exit(struct vcpu_vmx *vmx) static void vmx_save_host_cpuid_fault(struct vcpu_vmx *vmx) { - u64 host_val; + u64 host_val, guest_val; if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT)) return; @@ -1045,10 +1045,12 @@ static void vmx_save_host_cpuid_fault(struct vcpu_vmx *vmx) rdmsrl(MSR_MISC_FEATURES_ENABLES, host_val); vmx->host_msr_misc_features_enables = host_val; - /* clear cpuid fault bit to avoid it leak to guest */ - if (host_val & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT) { + guest_val = vmx->vcpu.arch.msr_misc_features_enables; + + /* we can use the hardware cpuid faulting to avoid emulation overhead */ + if ((host_val ^ guest_val) & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT) { wrmsrl(MSR_MISC_FEATURES_ENABLES, - host_val & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT); + host_val ^ MSR_MISC_FEATURES_ENABLES_CPUID_FAULT); } } @@ -2057,6 +2059,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) else vmx->pt_desc.guest.addr_a[index / 2] = data; break; + case MSR_MISC_FEATURES_ENABLES: + if (!kvm_supported_msr_misc_features_enables(vcpu, data)) + return 1; + if (boot_cpu_has(X86_FEATURE_CPUID_FAULT)) { + if (vmx->loaded_cpu_state) + wrmsrl(MSR_MISC_FEATURES_ENABLES, data); + } + vcpu->arch.msr_misc_features_enables = data; + break; case MSR_TSC_AUX: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 434ec113cc79..33a8c95b2f2e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2449,6 +2449,17 @@ static void record_steal_time(struct kvm_vcpu *vcpu) &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } +int kvm_supported_msr_misc_features_enables(struct kvm_vcpu *vcpu, u64 data) +{ + if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || + (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && + !supports_cpuid_fault(vcpu))) + return 0; + else + return 1; +} +EXPORT_SYMBOL_GPL(kvm_supported_msr_misc_features_enables); + int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; @@ -2669,9 +2680,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.msr_platform_info = data; break; case MSR_MISC_FEATURES_ENABLES: - if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || - (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && - !supports_cpuid_fault(vcpu))) + if (!kvm_supported_msr_misc_features_enables(vcpu, data)) return 1; vcpu->arch.msr_misc_features_enables = data; break; -- 2.19.1