From: Like Xu <likexu@xxxxxxxxxxx> AMD only has gp counters, whose corresponding vPMCs are initialised and stored in pmu->gp_counter[] in order of idx, so we can access this array directly based on any valid pmc->idx, without any help from other interfaces at all. The amd_rdpmc_ecx_to_pmc() can now reuse this part of the code quite naturally. Opportunistically apply array_index_nospec() to reduce the attack surface for speculative execution and remove the dead code. Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> --- v1 -> v2 Changelog: - Remove unused helper get_msr_base(); arch/x86/kvm/svm/pmu.c | 45 +++++++----------------------------------- 1 file changed, 7 insertions(+), 38 deletions(-) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index a3b78342a221..6cd8d3c2000c 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -61,21 +61,14 @@ static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = { static_assert(ARRAY_SIZE(amd_event_mapping) == ARRAY_SIZE(amd_f17h_event_mapping)); -static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) +static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) { - struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); + unsigned int num_counters = pmu->nr_arch_gp_counters; - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { - if (type == PMU_TYPE_COUNTER) - return MSR_F15H_PERF_CTR; - else - return MSR_F15H_PERF_CTL; - } else { - if (type == PMU_TYPE_COUNTER) - return MSR_K7_PERFCTR0; - else - return MSR_K7_EVNTSEL0; - } + if (pmc_idx >= num_counters) + return NULL; + + return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; } static enum index msr_to_index(u32 msr) @@ -186,22 +179,6 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) return true; } -static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) -{ - unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); - struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); - - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { - /* - * The idx is contiguous. The MSRs are not. The counter MSRs - * are interleaved with the event select MSRs. - */ - pmc_idx *= 2; - } - - return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER); -} - static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -215,15 +192,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask) { - struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - struct kvm_pmc *counters; - - idx &= ~(3u << 30); - if (idx >= pmu->nr_arch_gp_counters) - return NULL; - counters = pmu->gp_counters; - - return &counters[idx]; + return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); } static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) -- 2.36.1