[RFC PATCH 18/18] KVM: x86: clear SMM flags before loading state while leaving SMM

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Reported-by: Jon Doron <arilou@xxxxxxxxx>
Cc: Jim Mattson <jmattson@xxxxxxxxxx>
Cc: Liran Alon <liran.alon@xxxxxxxxxx>
Cc: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
Fixes: 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
 arch/x86/kvm/svm.c     | 12 ++++--------
 arch/x86/kvm/vmx/vmx.c |  2 --
 arch/x86/kvm/x86.c     | 10 +++++-----
 3 files changed, 9 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 33975e2aa486..b76c5d34a814 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6222,21 +6222,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 	struct page *page;
 	u64 guest;
 	u64 vmcb;
-	int ret;
 
 	guest = GET_SMSTATE(u64, smsate, 0x7ed8);
 	vmcb = GET_SMSTATE(u64, smsate, 0x7ee0);
 
 	if (guest) {
-		vcpu->arch.hflags &= ~HF_SMM_MASK;
 		nested_vmcb = nested_svm_map(svm, vmcb, &page);
-		if (nested_vmcb)
-			enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
-		else
-			ret = 1;
-		vcpu->arch.hflags |= HF_SMM_MASK;
+		if (!nested_vmcb)
+			return 1;
+		enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
 	}
-	return ret;
+	return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4612a1e7585e..eeed91a1efc6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7379,9 +7379,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 	}
 
 	if (vmx->nested.smm.guest_mode) {
-		vcpu->arch.hflags &= ~HF_SMM_MASK;
 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
-		vcpu->arch.hflags |= HF_SMM_MASK;
 		if (ret)
 			return ret;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1e510ca8a8e4..ef8ba625fea5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7788,6 +7788,11 @@ static int leave_smm(struct kvm_vcpu *vcpu)
 				sizeof(buf)))
 		return X86EMUL_UNHANDLEABLE;
 
+	if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
+		kvm_x86_ops->set_nmi_mask(vcpu, false);
+
+	vcpu->arch.hflags &= ~(HF_SMM_INSIDE_NMI_MASK | HF_SMM_MASK);
+
 	/*
 	 * Get back to real mode, to prepare a safe state in which to load
 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
@@ -7841,11 +7846,6 @@ static int leave_smm(struct kvm_vcpu *vcpu)
 		return X86EMUL_UNHANDLEABLE;
 	}
 
-	if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
-		kvm_x86_ops->set_nmi_mask(vcpu, false);
-
-	vcpu->arch.hflags &= ~(HF_SMM_INSIDE_NMI_MASK | HF_SMM_MASK);
-
 	kvm_smm_changed(vcpu);
 
 	return X86EMUL_CONTINUE;
-- 
2.21.0




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux