On Thu, Jun 20, 2024, Dapeng Mi wrote: > The existing macro, KVM_INTEL_PMC_MAX_GENERIC, ambiguously represents the > maximum supported General Purpose (GP) counter number for both Intel and > AMD platforms. This could lead to issues if AMD begins to support more GP > counters than Intel. > > To resolve this, a new platform-independent macro, KVM_PMC_MAX_GENERIC, > is introduced to represent the maximum GP counter number across all x86 > platforms. > > No logic changes are introduced in this patch. > > Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 9 +++++---- > arch/x86/kvm/svm/pmu.c | 2 +- > arch/x86/kvm/vmx/pmu_intel.c | 2 ++ > 3 files changed, 8 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 57440bda4dc4..18137be6504a 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -534,11 +534,12 @@ struct kvm_pmc { > > /* More counters may conflict with other existing Architectural MSRs */ > #define KVM_INTEL_PMC_MAX_GENERIC 8 > -#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1) > -#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1) > +#define KVM_AMD_PMC_MAX_GENERIC 6 > +#define KVM_PMC_MAX_GENERIC KVM_INTEL_PMC_MAX_GENERIC Since we're changing the macro, maybe take the opportunity to use a better name? E.g. KVM_MAX_NR_GP_COUNTERS? And then in a follow-up patch, give fixed counters the same treatment, e.g. KVM_MAX_NR_FIXED_COUNTERS. Or maybe KVM_MAX_NR_GP_PMCS and KVM_MAX_NR_FIXED_PMCS? > +#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_PMC_MAX_GENERIC - 1) > +#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_PMC_MAX_GENERIC - 1) And I'm very, very tempted to say we should simply delete these two, along with MSR_ARCH_PERFMON_FIXED_CTR_MAX, and just open code the "end" MSR in the one user. Especially since "KVM" doesn't appear anyone in the name, i.e. because the names misrepresent KVM's semi-arbitrary max as the *architectural* max. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6ad19d913d31..547dfe40d017 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7432,17 +7432,20 @@ static void kvm_probe_msr_to_save(u32 msr_index) intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)) return; break; - case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: + case MSR_ARCH_PERFMON_PERFCTR0 ... + MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1: if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >= kvm_pmu_cap.num_counters_gp) return; break; - case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: + case MSR_ARCH_PERFMON_EVENTSEL0 ... + MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1: if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >= kvm_pmu_cap.num_counters_gp) return; break; - case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX: + case MSR_ARCH_PERFMON_FIXED_CTR0 ... + MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAR_NR_FIXED_COUNTERS - 1: if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >= kvm_pmu_cap.num_counters_fixed) return; > #define KVM_PMC_MAX_FIXED 3 > #define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) > -#define KVM_AMD_PMC_MAX_GENERIC 6 > > struct kvm_pmu { > u8 version; > @@ -554,7 +555,7 @@ struct kvm_pmu { > u64 global_status_rsvd; > u64 reserved_bits; > u64 raw_event_mask; > - struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; > + struct kvm_pmc gp_counters[KVM_PMC_MAX_GENERIC]; > struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; > > /* > diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > index 6e908bdc3310..2fca247798eb 100644 > --- a/arch/x86/kvm/svm/pmu.c > +++ b/arch/x86/kvm/svm/pmu.c > @@ -218,7 +218,7 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu) > int i; > > BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE); > - BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC); > + BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > KVM_PMC_MAX_GENERIC); > > for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) { > pmu->gp_counters[i].type = KVM_PMC_GP; > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index fb5cbd6cbeff..a4b0bee04596 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -570,6 +570,8 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); > > + BUILD_BUG_ON(KVM_INTEL_PMC_MAX_GENERIC > KVM_PMC_MAX_GENERIC); Rather than BUILD_BUG_ON() for both Intel and AMD, can't we just do? #define KVM_MAX_NR_GP_COUNTERS max(KVM_INTEL_PMC_MAX_GENERIC, KVM_AMD_PMC_MAX_GENERIC) > + > for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) { > pmu->gp_counters[i].type = KVM_PMC_GP; > pmu->gp_counters[i].vcpu = vcpu; > -- > 2.34.1 >