Otherwise guest entry code might see incorrect L1 state (e.g paging state). Fixes: 37be407b2ce8 ("KVM: nSVM: Fix L1 state corruption upon return from SMM") Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a75dafbfa4a6..e3b092a3a2e1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4358,10 +4358,6 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) if (svm_allocate_nested(svm)) goto unmap_save; - vmcb12 = map.hva; - nested_load_control_from_vmcb12(svm, &vmcb12->control); - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); - /* * Restore L1 host state from L1 HSAVE area as VMCB01 was * used during SMM (see svm_enter_smm()) @@ -4369,6 +4365,14 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); + /* + * Enter the nested guest now + */ + + vmcb12 = map.hva; + nested_load_control_from_vmcb12(svm, &vmcb12->control); + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); + unmap_save: kvm_vcpu_unmap(vcpu, &map_save, true); unmap_map: -- 2.26.3