From: Wanpeng Li <wanpengli@xxxxxxxxxxx> vPMU depends on in-kernel lapic to deliver pmi interrupt, there is a lot of overhead when creating/maintaining perf_event object, locking/unlocking perf_event_ctx etc for vPMU. It silently fails to deliver pmi interrupt if w/o in-kernel lapic currently. Let's not program counter for interrupt-based event sampling w/o in-kernel lapic support to avoid the whole bothering. Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx> --- arch/x86/kvm/pmu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 0772bad9165c..fa5cd33af10d 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -179,6 +179,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) struct kvm_pmu_event_filter *filter; int i; bool allow_event = true; + bool intr = eventsel & ARCH_PERFMON_EVENTSEL_INT; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); @@ -187,7 +188,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) pmc_pause_counter(pmc); - if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) + if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc) + || (intr && !lapic_in_kernel(pmc->vcpu))) return; filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); @@ -233,7 +235,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) pmc_reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), - eventsel & ARCH_PERFMON_EVENTSEL_INT, + intr, (eventsel & HSW_IN_TX), (eventsel & HSW_IN_TX_CHECKPOINTED)); } @@ -248,7 +250,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) pmc_pause_counter(pmc); - if (!en_field || !pmc_is_enabled(pmc)) + if (!en_field || !pmc_is_enabled(pmc) || (pmi && !lapic_in_kernel(pmc->vcpu))) return; filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); -- 2.25.1