Add one extra pmu function prototype in kvm_pmu_ops to disable PMU MSR interception. Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + arch/x86/kvm/cpuid.c | 4 ++++ arch/x86/kvm/pmu.c | 5 +++++ arch/x86/kvm/pmu.h | 2 ++ 4 files changed, 12 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index fd986d5146e4..1b7876dcb3c3 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -24,6 +24,7 @@ KVM_X86_PMU_OP(is_rdpmc_passthru_allowed) KVM_X86_PMU_OP_OPTIONAL(reset) KVM_X86_PMU_OP_OPTIONAL(deliver_pmi) KVM_X86_PMU_OP_OPTIONAL(cleanup) +KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) #undef KVM_X86_PMU_OP #undef KVM_X86_PMU_OP_OPTIONAL diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 77352a4abd87..b577ba649feb 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -381,6 +381,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); + + if (is_passthrough_pmu_enabled(vcpu)) + kvm_pmu_passthrough_pmu_msrs(vcpu); + vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 3afefe4cf6e2..bd94f2d67f5c 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -1059,3 +1059,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) kfree(filter); return r; } + +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) +{ + static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu); +} diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index e1af6d07b191..63f876557716 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -41,6 +41,7 @@ struct kvm_pmu_ops { void (*deliver_pmi)(struct kvm_vcpu *vcpu); void (*cleanup)(struct kvm_vcpu *vcpu); bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu); + void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu); const u64 EVENTSEL_EVENT; const int MAX_NR_GP_COUNTERS; @@ -292,6 +293,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu); bool is_vmware_backdoor_pmc(u32 pmc_idx); -- 2.45.0.rc1.225.g2a3ae87e7f-goog