On 1/28/2023 6:11 AM, Sean Christopherson wrote:
On Thu, Nov 24, 2022, Yang Weijiang wrote:
Per SDM 3B Chapter 18: "IA32_LBR_CTL.LBREn is saved and cleared on #SMI,
and restored on RSM", store guest IA32_LBR_CTL in SMRAM and clear LBREn
in VMCS at SMM entry, and do reverse things at SMM exit.
[...]
@@ -8006,11 +8006,21 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
Uh, so this arbitrary dependency on 64-bit vCPUs needs to be factored into the
enabling. And KVM should WARN if arch LBRs get enabled for a 32-bit vCPU.
OK, will add the check while creating event.
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ smram->smram64.arch_lbr_ctl = ctl;
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ }
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -8027,6 +8037,18 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = smram->smram64.arch_lbr_ctl;
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ARCH_LBR_CTL_LBREN);
IIUC, this should set only LBREn and preserve all other bits, not clobber the
entire MSR.
Oops, it's a typo, thanks!
+
+ if (intel_pmu_lbr_is_enabled(vcpu) &&
+ (ctl & ARCH_LBR_CTL_LBREN) && !lbr_desc->event)
+ intel_pmu_create_guest_lbr_event(vcpu);
+ }
+
return 0;
}
--
2.27.0