On 10/07/20 17:25, Vitaly Kuznetsov wrote: > state_test/smm_test selftests are failing on AMD with: > "Unexpected result from KVM_GET_MSRS, r: 51 (failed MSR was 0x345)" > > MSR_IA32_PERF_CAPABILITIES is an emulated MSR on Intel but it is not > known to AMD code, we can move the emulation to common x86 code. For > AMD, we basically just allow the host to read and write zero to the MSR. > > Fixes: 27461da31089 ("KVM: x86/pmu: Support full width counting") > Suggested-by: Jim Mattson <jmattson@xxxxxxxxxx> > Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> > --- > - This is a succesor of "[PATCH v2] KVM: SVM: emulate MSR_IA32_PERF_CAPABILITIES". > --- > arch/x86/kvm/svm/svm.c | 2 ++ > arch/x86/kvm/vmx/pmu_intel.c | 17 ----------------- > arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ > 3 files changed, 22 insertions(+), 17 deletions(-) > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index c0da4dd78ac5..1b68cc6cd756 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -2358,6 +2358,8 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) > if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) > msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; > break; > + case MSR_IA32_PERF_CAPABILITIES: > + return 0; > default: > return 1; > } > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index bdcce65c7a1d..a886a47daebd 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -180,9 +180,6 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > ret = pmu->version > 1; > break; > - case MSR_IA32_PERF_CAPABILITIES: > - ret = 1; > - break; > default: > ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || > get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || > @@ -224,12 +221,6 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > msr_info->data = pmu->global_ovf_ctrl; > return 0; > - case MSR_IA32_PERF_CAPABILITIES: > - if (!msr_info->host_initiated && > - !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) > - return 1; > - msr_info->data = vcpu->arch.perf_capabilities; > - return 0; > default: > if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || > (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { > @@ -289,14 +280,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 0; > } > break; > - case MSR_IA32_PERF_CAPABILITIES: > - if (!msr_info->host_initiated) > - return 1; > - if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) ? > - (data & ~vmx_get_perf_capabilities()) : data) > - return 1; > - vcpu->arch.perf_capabilities = data; > - return 0; > default: > if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || > (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 3b92db412335..a08bd66cd662 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -2817,6 +2817,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > vcpu->arch.arch_capabilities = data; > break; > + case MSR_IA32_PERF_CAPABILITIES: { > + struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; > + > + if (!msr_info->host_initiated) > + return 1; > + if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent)) > + return 1; > + if (data & ~msr_ent.data) > + return 1; > + > + vcpu->arch.perf_capabilities = data; > + > + return 0; > + } > case MSR_EFER: > return set_efer(vcpu, msr_info); > case MSR_K7_HWCR: > @@ -3167,6 +3181,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > msr_info->data = vcpu->arch.arch_capabilities; > break; > + case MSR_IA32_PERF_CAPABILITIES: > + if (!msr_info->host_initiated && > + !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) > + return 1; > + msr_info->data = vcpu->arch.perf_capabilities; > + break; > case MSR_IA32_POWER_CTL: > msr_info->data = vcpu->arch.msr_ia32_power_ctl; > break; > Queued, thanks. Paolo