From: Sandipan Das <sandipan.das@xxxxxxx> Implement the AMD-specific callback for passthrough PMU that disables interception of PMU-related MSRs if the guest PMU counters qualify the requirement of passthrough. The PMU registers include the following. - PerfCntrGlobalStatus (MSR 0xc0000300) - PerfCntrGlobalCtl (MSR 0xc0000301) - PerfCntrGlobalStatusClr (MSR 0xc0000302) - PerfCntrGlobalStatusSet (MSR 0xc0000303) - PERF_CTLx and PERF_CTRx pairs (MSRs 0xc0010200..0xc001020b) Note that the passthrough/interception is invoked after each CPUID set. Since CPUID set can be done multiple times, do the intercept/clear of the bitmap explicitly for each counters as well as global registers. Note that even if the host is PerfCtrCore or PerfMonV2 capable, a guest should still be able to use the four K7 legacy counters. Disable interception of these MSRs in passthrough mode. Signed-off-by: Sandipan Das <sandipan.das@xxxxxxx> Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> --- arch/x86/kvm/svm/pmu.c | 55 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 0a16f0eb2511..cc03c3e9941f 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -248,6 +248,60 @@ static bool amd_is_rdpmc_passthru_allowed(struct kvm_vcpu *vcpu) return true; } +static void amd_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct vcpu_svm *svm = to_svm(vcpu); + int msr_clear = !!(is_passthrough_pmu_enabled(vcpu)); + int i; + + for (i = 0; i < min(pmu->nr_arch_gp_counters, AMD64_NUM_COUNTERS); i++) { + /* + * Legacy counters are always available irrespective of any + * CPUID feature bits and when X86_FEATURE_PERFCTR_CORE is set, + * PERF_LEGACY_CTLx and PERF_LEGACY_CTRx registers are mirrored + * with PERF_CTLx and PERF_CTRx respectively. + */ + set_msr_interception(vcpu, svm->msrpm, MSR_K7_EVNTSEL0 + i, 0, 0); + set_msr_interception(vcpu, svm->msrpm, MSR_K7_PERFCTR0 + i, msr_clear, msr_clear); + } + + for (i = 0; i < kvm_pmu_cap.num_counters_gp; i++) { + /* + * PERF_CTLx registers require interception in order to clear + * HostOnly bit and set GuestOnly bit. This is to prevent the + * PERF_CTRx registers from counting before VM entry and after + * VM exit. + */ + set_msr_interception(vcpu, svm->msrpm, MSR_F15H_PERF_CTL + 2 * i, 0, 0); + + /* + * Pass through counters exposed to the guest and intercept + * counters that are unexposed. Do this explicitly since this + * function may be set multiple times before vcpu runs. + */ + if (i >= pmu->nr_arch_gp_counters) + msr_clear = 0; + set_msr_interception(vcpu, svm->msrpm, MSR_F15H_PERF_CTR + 2 * i, msr_clear, msr_clear); + } + + /* + * In mediated passthrough vPMU, intercept global PMU MSRs when guest + * PMU only owns a subset of counters provided in HW or its version is + * less than 2. + */ + if (is_passthrough_pmu_enabled(vcpu) && pmu->version > 1 && + pmu->nr_arch_gp_counters == kvm_pmu_cap.num_counters_gp) + msr_clear = 1; + else + msr_clear = 0; + + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_PERF_CNTR_GLOBAL_CTL, msr_clear, msr_clear); + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, msr_clear, msr_clear); + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, msr_clear, msr_clear); + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET, msr_clear, msr_clear); +} + struct kvm_pmu_ops amd_pmu_ops __initdata = { .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, .msr_idx_to_pmc = amd_msr_idx_to_pmc, @@ -258,6 +312,7 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { .refresh = amd_pmu_refresh, .init = amd_pmu_init, .is_rdpmc_passthru_allowed = amd_is_rdpmc_passthru_allowed, + .passthrough_pmu_msrs = amd_passthrough_pmu_msrs, .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC, .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS, -- 2.46.0.rc1.232.g9752f9e123-goog