On Mon, Oct 24, 2022, Like Xu wrote: > @@ -104,11 +115,17 @@ static inline void write_gp_event_select(unsigned int i, u64 value) > > static inline u8 pmu_version(void) > { > + if (!is_intel()) > + return 0; This can be handled by adding pmu_caps.version. > + > return cpuid_10.a & 0xff; > } > > static inline bool this_cpu_has_pmu(void) > { > + if (!is_intel()) > + return true; I think it makes sense to kill off this_cpu_has_pmu(), the only usage is after an explicit is_intel() check, and practically speaking that will likely hold true since differentiating between Intel and AMD PMUs seems inevitable. > + > return !!pmu_version(); > } > > @@ -135,12 +152,18 @@ static inline void set_nr_gp_counters(u8 new_num) > > static inline u8 pmu_gp_counter_width(void) > { > - return (cpuid_10.a >> 16) & 0xff; > + if (is_intel()) Again, can be handled by utilizing pmu_caps. > + return (cpuid_10.a >> 16) & 0xff; > + else > + return PMC_DEFAULT_WIDTH; > } > > static inline u8 pmu_gp_counter_mask_length(void) > { > - return (cpuid_10.a >> 24) & 0xff; > + if (is_intel()) > + return (cpuid_10.a >> 24) & 0xff; > + else > + return pmu_nr_gp_counters(); > } > > static inline u8 pmu_nr_fixed_counters(void) > @@ -161,6 +184,9 @@ static inline u8 pmu_fixed_counter_width(void) > > static inline bool pmu_gp_counter_is_available(int i) > { > + if (!is_intel()) > + return i < pmu_nr_gp_counters(); > + > /* CPUID.0xA.EBX bit is '1 if they counter is NOT available. */ > return !(cpuid_10.b & BIT(i)); > } > @@ -268,4 +294,9 @@ static inline bool pebs_has_baseline(void) > return pmu.perf_cap & PMU_CAP_PEBS_BASELINE; > } > > +static inline bool has_amd_perfctr_core(void) Unnecessary wrappers, just use this_cpu_has() directly. > +{ > + return this_cpu_has(X86_FEATURE_PERFCTR_CORE); > +}