From: Xiong Zhang <xiong.y.zhang@xxxxxxxxxxxxxxx> In PMU passthrough mode, use global_ctrl field in struct kvm_pmu as the cached value. This is convenient for KVM to set and get the value from the host side. In addition, load and save the value across VM enter/exit boundary in the following way: - At VM exit, if processor supports GUEST_VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL, read guest IA32_PERF_GLOBAL_CTRL GUEST_IA32_PERF_GLOBAL_CTRL VMCS field, else read it from VM-exit MSR-stroe array in VMCS. The value is then assigned to global_ctrl. - At VM Entry, if processor supports GUEST_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, read guest IA32_PERF_GLOBAL_CTRL from GUEST_IA32_PERF_GLOBAL_CTRL VMCS field, else read it from VM-entry MSR-load array in VMCS. The value is then assigned to global ctrl. Implement the above logic into two helper functions and invoke them around VM Enter/exit boundary. Signed-off-by: Xiong Zhang <xiong.y.zhang@xxxxxxxxxxxxxxx> Co-developed-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 49 ++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9857dda8b851..54a56eda77f4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -598,6 +598,8 @@ struct kvm_pmu { u8 event_count; bool passthrough; + int global_ctrl_slot_in_autoload; + int global_ctrl_slot_in_autostore; }; struct kvm_pmu_ops; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 62b5913abdd6..c86b768743a9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4421,6 +4421,7 @@ static void vmx_set_perf_global_ctrl(struct vcpu_vmx *vmx) } m->val[i].index = MSR_CORE_PERF_GLOBAL_CTRL; m->val[i].value = 0; + vcpu_to_pmu(&vmx->vcpu)->global_ctrl_slot_in_autoload = i; } /* * Setup auto clear host PERF_GLOBAL_CTRL msr at vm exit. @@ -4448,6 +4449,7 @@ static void vmx_set_perf_global_ctrl(struct vcpu_vmx *vmx) vmcs_write32(VM_EXIT_MSR_STORE_COUNT, m->nr); } m->val[i].index = MSR_CORE_PERF_GLOBAL_CTRL; + vcpu_to_pmu(&vmx->vcpu)->global_ctrl_slot_in_autostore = i; } } else { if (!(vmentry_ctrl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)) { @@ -4458,6 +4460,7 @@ static void vmx_set_perf_global_ctrl(struct vcpu_vmx *vmx) m->val[i] = m->val[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); } + vcpu_to_pmu(&vmx->vcpu)->global_ctrl_slot_in_autoload = -ENOENT; } if (!(vmexit_ctrl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)) { m = &vmx->msr_autoload.host; @@ -4476,6 +4479,7 @@ static void vmx_set_perf_global_ctrl(struct vcpu_vmx *vmx) m->val[i] = m->val[m->nr]; vmcs_write32(VM_EXIT_MSR_STORE_COUNT, m->nr); } + vcpu_to_pmu(&vmx->vcpu)->global_ctrl_slot_in_autostore = -ENOENT; } } @@ -7236,7 +7240,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } -static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) +static void __atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; @@ -7259,6 +7263,46 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } +static void save_perf_global_ctrl_in_passthrough_pmu(struct vcpu_vmx *vmx) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu); + int i; + + if (vm_exit_controls_get(vmx) & VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL) { + pmu->global_ctrl = vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL); + } else { + i = pmu->global_ctrl_slot_in_autostore; + pmu->global_ctrl = vmx->msr_autostore.guest.val[i].value; + } +} + +static void load_perf_global_ctrl_in_passthrough_pmu(struct vcpu_vmx *vmx) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu); + u64 global_ctrl = pmu->global_ctrl; + int i; + + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) { + vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, global_ctrl); + } else { + i = pmu->global_ctrl_slot_in_autoload; + vmx->msr_autoload.guest.val[i].value = global_ctrl; + } +} + +static void __atomic_switch_perf_msrs_in_passthrough_pmu(struct vcpu_vmx *vmx) +{ + load_perf_global_ctrl_in_passthrough_pmu(vmx); +} + +static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) +{ + if (is_passthrough_pmu_enabled(&vmx->vcpu)) + __atomic_switch_perf_msrs_in_passthrough_pmu(vmx); + else + __atomic_switch_perf_msrs(vmx); +} + static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -7369,6 +7413,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; + if (is_passthrough_pmu_enabled(vcpu)) + save_perf_global_ctrl_in_passthrough_pmu(vmx); + vmx->idt_vectoring_info = 0; vmx_enable_fb_clear(vmx); -- 2.45.0.rc1.225.g2a3ae87e7f-goog