====================================================================
From ceba1527fd87cdc789b38fce454058fca6582b0a Mon Sep 17 00:00:00 2001
From: Yang Weijiang <weijiang.yang@xxxxxxxxx>
Date: Thu, 5 Aug 2021 20:48:39 +0800
Subject: [PATCH] KVM: x86/vmx: Flip Arch LBREn bit on guest state change
Per spec:"IA32_LBR_CTL.LBREn is saved and cleared on #SMI, and restored
on RSM. On a warm reset, all LBR MSRs, including IA32_LBR_DEPTH, have
their
values preserved. However, IA32_LBR_CTL.LBREn is cleared to 0, disabling
LBRs." Given migration in SMM, use a reserved bit(63) of the MSR to
mirror
LBREn bit, it facilitates Arch LBR specific handling during live
migration
because user space will check LBREn to decide whether it's necessary to
migrate the Arch LBR related MSRs. When the mirrored bit and LBREn bit
are
both set, it means Arch LBR was active in SMM, so only create perf event
and defer the LBREn bit restoring to leave_smm callback.
Also clear LBREn at warm reset.
Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx>
---
arch/x86/kvm/vmx/pmu_intel.c | 16 +++++++++++++---
arch/x86/kvm/vmx/vmx.c | 29 +++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 1 +
3 files changed, 43 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 038fdb788ccd..652601ad99ea 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -373,6 +373,8 @@ static bool arch_lbr_depth_is_valid(struct
kvm_vcpu *vcpu, u64 depth)
return (depth == pmu->kvm_arch_lbr_depth);
}
+#define ARCH_LBR_IN_SMM BIT(63)
+
static bool arch_lbr_ctl_is_valid(struct kvm_vcpu *vcpu, u64 ctl)
{
struct kvm_cpuid_entry2 *entry;
@@ -380,7 +382,7 @@ static bool arch_lbr_ctl_is_valid(struct kvm_vcpu
*vcpu, u64 ctl)
if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
return false;
- if (ctl & ~KVM_ARCH_LBR_CTL_MASK)
+ if (ctl & ~(KVM_ARCH_LBR_CTL_MASK | ARCH_LBR_IN_SMM))
goto warn;
entry = kvm_find_cpuid_entry(vcpu, 0x1c, 0);
@@ -425,6 +427,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu
*vcpu, struct msr_data *msr_info)
return 0;
case MSR_ARCH_LBR_CTL:
msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
+ if (to_vmx(vcpu)->lbr_in_smm) {
+ msr_info->data |= ARCH_LBR_CTL_LBREN;
+ msr_info->data |= ARCH_LBR_IN_SMM;
+ }
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
@@ -501,11 +507,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu
*vcpu, struct msr_data *msr_info)
if (!arch_lbr_ctl_is_valid(vcpu, data))
break;
- vmcs_write64(GUEST_IA32_LBR_CTL, data);
-
if (intel_pmu_lbr_is_enabled(vcpu) && !lbr_desc->event &&
(data & ARCH_LBR_CTL_LBREN))
intel_pmu_create_guest_lbr_event(vcpu);
+
+ if (data & ARCH_LBR_IN_SMM) {
+ data &= ~ARCH_LBR_CTL_LBREN;
+ data &= ~ARCH_LBR_IN_SMM;
+ }
+ vmcs_write64(GUEST_IA32_LBR_CTL, data);
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6d6ee9cf82f5..f754b9400151 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4543,6 +4543,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu
*vcpu, bool init_event)
vmx->rmode.vm86_active = 0;
vmx->spec_ctrl = 0;
+ vmx->lbr_in_smm = false;
vmx->msr_ia32_umwait_control = 0;
@@ -4593,6 +4594,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu
*vcpu, bool init_event)
if (!init_event) {
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
vmcs_write64(GUEST_IA32_LBR_CTL, 0);
+ } else {
+ flip_arch_lbr_ctl(vcpu, false);
}
}
@@ -7695,6 +7698,8 @@ static int vmx_smi_allowed(struct kvm_vcpu
*vcpu, bool for_injection)
static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
@@ -7704,12 +7709,26 @@ static int vmx_enter_smm(struct kvm_vcpu
*vcpu, char *smstate)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event && guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ put_smstate(u64, smstate, 0x7f10, ctl);
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ vmx->lbr_in_smm = true;
+ }
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+
int ret;
if (vmx->nested.smm.vmxon) {
@@ -7725,6 +7744,16 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu,
const char *smstate)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event && guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = GET_SMSTATE(u64, smstate, 0x7f10);
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl | ARCH_LBR_CTL_LBREN);
+ vmx->lbr_in_smm = false;
+ }
+
return 0;
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index b98c7e96697a..a227fe8bf288 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -351,6 +351,7 @@ struct vcpu_vmx {
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
+ bool lbr_in_smm;
/* Save desired MSR intercept (read: pass-through) state */
#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15