On Mon, Sep 30, 2019 at 03:22:56PM +0800, Like Xu wrote: > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 46875bbd0419..74bc5c42b8b5 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -140,6 +140,35 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, > clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); > } > > +static void pmc_pause_counter(struct kvm_pmc *pmc) > +{ > + if (!pmc->perf_event) > + return; > + > + pmc->counter = pmc_read_counter(pmc); > + > + perf_event_disable(pmc->perf_event); > + > + /* reset count to avoid redundant accumulation */ > + local64_set(&pmc->perf_event->count, 0); Yuck, don't frob in data structures you don't own. Just like you exported the IOC_PERIOD thing, so too is there a IOC_RESET. Furthermore; wth do you call pmc_read_counter() _before_ doing perf_event_disable() ? Doing it the other way around is much cheaper, even better, you can use perf_event_count() after disable. > +} > + > +static bool pmc_resume_counter(struct kvm_pmc *pmc) > +{ > + if (!pmc->perf_event) > + return false; > + > + /* recalibrate sample period and check if it's accepted by perf core */ > + if (perf_event_period(pmc->perf_event, > + (-pmc->counter) & pmc_bitmask(pmc))) > + return false; I'd do the reset here, but then you have 3 function in a row that do perf_event_ctx_lock()+perf_event_ctx_unlock(), which is rather expensive. > + > + /* reuse perf_event to serve as pmc_reprogram_counter() does*/ > + perf_event_enable(pmc->perf_event); > + clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); > + return true; > +}