If the guest is entered prior to restoring the host save area, the guest entry code might see incorrect L1 state (e.g paging state). Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1a70e11f0487..ea7a4dacd42f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4347,27 +4347,30 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) return 1; - if (svm_allocate_nested(svm)) + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), + &map_save) == -EINVAL) return 1; - vmcb12 = map.hva; - - nested_load_control_from_vmcb12(svm, &vmcb12->control); - - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); - kvm_vcpu_unmap(vcpu, &map, true); + if (svm_allocate_nested(svm)) + return 1; /* * Restore L1 host state from L1 HSAVE area as VMCB01 was * used during SMM (see svm_enter_smm()) */ - if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), - &map_save) == -EINVAL) - return 1; svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); + /* + * Restore L2 state + */ + + vmcb12 = map.hva; + nested_load_control_from_vmcb12(svm, &vmcb12->control); + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); + + kvm_vcpu_unmap(vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map_save, true); } } -- 2.26.3