On Mon, Aug 23, 2021, Maxim Levitsky wrote: > This allows nested SVM code to be more similar to nested VMX code. > > Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> > --- > arch/x86/kvm/svm/nested.c | 9 ++++++--- > arch/x86/kvm/svm/svm.c | 8 +++++++- > arch/x86/kvm/svm/svm.h | 3 ++- > 3 files changed, 15 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 5e13357da21e..678fd21f6077 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -572,7 +572,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to > } > > int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, > - struct vmcb *vmcb12) > + struct vmcb *vmcb12, bool from_entry) from_vmrun would be a better name. VMX uses the slightly absstract from_vmentry because of the VMLAUNCH vs. VMRESUME silliness. If we want to explicitly follow VMX then from_vmentry would be more appropriate, but I don't see any reason not to be more precise. > { > struct vcpu_svm *svm = to_svm(vcpu); > int ret; > @@ -602,13 +602,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, > nested_vmcb02_prepare_save(svm, vmcb12); > > ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, > - nested_npt_enabled(svm), true); > + nested_npt_enabled(svm), from_entry); > if (ret) > return ret; > > if (!npt_enabled) > vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; > > + if (!from_entry) > + kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); > + > svm_set_gif(svm, true); > > return 0; > @@ -674,7 +677,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) > > svm->nested.nested_run_pending = 1; > > - if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) > + if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) > goto out_exit_err; > > if (nested_svm_vmrun_msrpm(svm)) > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index ea7a4dacd42f..76ee15af8c48 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -4354,6 +4354,12 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) > if (svm_allocate_nested(svm)) > return 1; > > + /* Exit from the SMM to the non root mode also uses > + * the KVM_REQ_GET_NESTED_STATE_PAGES request, > + * but in this case the pdptrs must be always reloaded > + */ > + vcpu->arch.pdptrs_from_userspace = false; Hmm, I think this belongs in the previous patch. And I would probably go so far as to say it belongs in emulator_leave_smm(), i.e. pdptrs_from_userspace should be cleared on RSM regardless of what mode is being resumed. > + > /* > * Restore L1 host state from L1 HSAVE area as VMCB01 was > * used during SMM (see svm_enter_smm()) > @@ -4368,7 +4374,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) > > vmcb12 = map.hva; > nested_load_control_from_vmcb12(svm, &vmcb12->control); > - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); > + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); > > kvm_vcpu_unmap(vcpu, &map, true); > kvm_vcpu_unmap(vcpu, &map_save, true); > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h > index 524d943f3efc..51ffa46ab257 100644 > --- a/arch/x86/kvm/svm/svm.h > +++ b/arch/x86/kvm/svm/svm.h > @@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) > return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); > } > > -int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); > +int enter_svm_guest_mode(struct kvm_vcpu *vcpu, > + u64 vmcb_gpa, struct vmcb *vmcb12, bool from_entry); Alignment is funky, it can/should match the definition, e.g. int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vmcb *vmcb12, bool from_entry); > void svm_leave_nested(struct vcpu_svm *svm); > void svm_free_nested(struct vcpu_svm *svm); > int svm_allocate_nested(struct vcpu_svm *svm); > -- > 2.26.3 >