Arch LBR MSRs are xsave-supported, but they're operated as "independent" xsave feature by PMU code, i.e., during thread/process context switch, the MSRs are saved/restored with perf_event_task_sched_{in|out} instead of generic kernel fpu switch code, i.e.,save_fpregs_to_fpstate() and restore_fpregs_from_fpstate(). When vcpu guest/host fpu state swap happens, Arch LBR MSRs are retained so they can be accessed directly. Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx> Reviewed-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> --- arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index b57944d5e7d8..241128972776 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -410,6 +410,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL); } return 0; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + rdmsrl(msr_info->index, msr_info->data); + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { @@ -528,6 +533,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (data & ARCH_LBR_CTL_LBREN)) intel_pmu_create_guest_lbr_event(vcpu); return 0; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + wrmsrl(msr_info->index, msr_info->data); + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { -- 2.27.0