Plumb through kvm_pmu_ops with these two extra functions to allow pmu context switch. Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> Tested-by: Yongwei Ma <yongwei.ma@xxxxxxxxx> --- arch/x86/include/asm/kvm-x86-pmu-ops.h | 2 ++ arch/x86/kvm/pmu.c | 14 ++++++++++++++ arch/x86/kvm/pmu.h | 4 ++++ 3 files changed, 20 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index 1b7876dcb3c3..1a848ba6a7a7 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -25,6 +25,8 @@ KVM_X86_PMU_OP_OPTIONAL(reset) KVM_X86_PMU_OP_OPTIONAL(deliver_pmi) KVM_X86_PMU_OP_OPTIONAL(cleanup) KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) +KVM_X86_PMU_OP_OPTIONAL(save_pmu_context) +KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context) #undef KVM_X86_PMU_OP #undef KVM_X86_PMU_OP_OPTIONAL diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index e9047051489e..782b564bdf96 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -1065,3 +1065,17 @@ void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) { static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu); } + +void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu) +{ + lockdep_assert_irqs_disabled(); + + static_call_cond(kvm_x86_pmu_save_pmu_context)(vcpu); +} + +void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu) +{ + lockdep_assert_irqs_disabled(); + + static_call_cond(kvm_x86_pmu_restore_pmu_context)(vcpu); +} diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 63f876557716..8bd4b79e363f 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -42,6 +42,8 @@ struct kvm_pmu_ops { void (*cleanup)(struct kvm_vcpu *vcpu); bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu); void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu); + void (*save_pmu_context)(struct kvm_vcpu *vcpu); + void (*restore_pmu_context)(struct kvm_vcpu *vcpu); const u64 EVENTSEL_EVENT; const int MAX_NR_GP_COUNTERS; @@ -294,6 +296,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu); +void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu); +void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu); bool is_vmware_backdoor_pmc(u32 pmc_idx); -- 2.46.0.rc1.232.g9752f9e123-goog