On Tue, Nov 01, 2011 at 09:49:19AM -0600, David Ahern wrote: > On 10/30/2011 10:53 AM, Gleb Natapov wrote: > > KVM needs to know perf capability to decide which PMU it can expose to a > > guest. > > > > Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx> > > --- > > arch/x86/include/asm/perf_event.h | 11 +++++++++++ > > arch/x86/kernel/cpu/perf_event.c | 11 +++++++++++ > > arch/x86/kernel/cpu/perf_event.h | 2 ++ > > arch/x86/kernel/cpu/perf_event_intel.c | 3 +++ > > 4 files changed, 27 insertions(+), 0 deletions(-) > > > > diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h > > index f61c62f..7d7e57f 100644 > > --- a/arch/x86/include/asm/perf_event.h > > +++ b/arch/x86/include/asm/perf_event.h > > @@ -201,7 +201,18 @@ struct perf_guest_switch_msr { > > u64 host, guest; > > }; > > > > +struct x86_pmu_capability { > > + int version; > > + int num_counters_gp; > > + int num_counters_fixed; > > + int bit_width_gp; > > + int bit_width_fixed; > > + unsigned int events_mask; > > + int events_mask_len; > > +}; > > + > > extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); > > +extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); > > #else > > static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) > > { > > What about version of perf_get_x86_pmu_capability for CONFIG_PERF_EVENTS > not enabled in host kernel? Next patch for KVM assumes the function is > defined. > As far as I understand it is not possible to build x86 without CONFIG_PERF_EVENTS right now. Actually kvm pmu code depends on CONFIG_PERF_EVENTS been enabled. I can easily provide the stub if needed though. > David > > > > diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c > > index 6408910..94ac9ca 100644 > > --- a/arch/x86/kernel/cpu/perf_event.c > > +++ b/arch/x86/kernel/cpu/perf_event.c > > @@ -1570,3 +1570,14 @@ unsigned long perf_misc_flags(struct pt_regs *regs) > > > > return misc; > > } > > + > > +void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) > > +{ > > + cap->version = x86_pmu.version; > > + cap->num_counters_gp = x86_pmu.num_counters; > > + cap->num_counters_fixed = x86_pmu.num_counters_fixed; > > + cap->bit_width_gp = cap->bit_width_fixed = x86_pmu.cntval_bits; > > + cap->events_mask = x86_pmu.events_mask; > > + cap->events_mask_len = x86_pmu.events_mask_len; > > +} > > +EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); > > diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h > > index b9698d4..e9ed238 100644 > > --- a/arch/x86/kernel/cpu/perf_event.h > > +++ b/arch/x86/kernel/cpu/perf_event.h > > @@ -259,6 +259,8 @@ struct x86_pmu { > > int num_counters_fixed; > > int cntval_bits; > > u64 cntval_mask; > > + u32 events_mask; > > + int events_mask_len; > > int apic; > > u64 max_period; > > struct event_constraint * > > diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c > > index e09ca20..64e5f35 100644 > > --- a/arch/x86/kernel/cpu/perf_event_intel.c > > +++ b/arch/x86/kernel/cpu/perf_event_intel.c > > @@ -1580,6 +1580,8 @@ __init int intel_pmu_init(void) > > x86_pmu.num_counters = eax.split.num_counters; > > x86_pmu.cntval_bits = eax.split.bit_width; > > x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; > > + x86_pmu.events_mask = ebx; > > + x86_pmu.events_mask_len = eax.split.mask_length; > > > > /* > > * Quirk: v2 perfmon does not report fixed-purpose events, so > > @@ -1651,6 +1653,7 @@ __init int intel_pmu_init(void) > > * architectural event which is often completely bogus: > > */ > > intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; > > + x86_pmu.events_mask &= ~0x40; > > > > pr_cont("erratum AAJ80 worked around, "); > > } -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html