As a preparatory change for implementing nested specifig PGD switch for nSVM (following nVMX' nested_vmx_load_cr3()) instead of relying on kvm_set_cr3() introduce nested_svm_load_cr3(). The only intended functional change for now is that errors from kvm_set_cr3() are propagated. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 5e6c988a4e6b..d0fd63e8d835 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -311,8 +311,25 @@ static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, nested_vmcb->control.exit_int_info = exit_int_info; } -static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) +static inline bool nested_npt_enabled(struct vcpu_svm *svm) { + return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; +} + +/* + * Load guest's cr3 at nested entry. @nested_npt is true if we are + * emulating VM-Entry into a guest with NPT enabled. + */ +static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, + bool nested_npt) +{ + return kvm_set_cr3(vcpu, cr3); +} + +static int nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) +{ + int ret = 0; + /* Load the nested guest state */ svm->vmcb->save.es = nested_vmcb->save.es; svm->vmcb->save.cs = nested_vmcb->save.cs; @@ -324,7 +341,9 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); - (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); + + ret = nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3, + nested_npt_enabled(svm)); svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); @@ -338,12 +357,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; svm->vmcb->save.cpl = nested_vmcb->save.cpl; + + return ret; } static void nested_prepare_vmcb_control(struct vcpu_svm *svm) { const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; - if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) + if (nested_npt_enabled(svm)) nested_svm_init_mmu_context(&svm->vcpu); /* Guest paging mode is active - reset mmu */ @@ -382,9 +403,15 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb *nested_vmcb) { + int ret; + svm->nested.vmcb = vmcb_gpa; load_nested_vmcb_control(svm, &nested_vmcb->control); - nested_prepare_vmcb_save(svm, nested_vmcb); + + ret = nested_prepare_vmcb_save(svm, nested_vmcb); + if (ret) + return ret; + nested_prepare_vmcb_control(svm); svm_set_gif(svm, true); -- 2.25.4