if new KVM_*_SREGS2 ioctls are used, the PDPTRs are a part of the migration state and are correctly restored by those ioctls. Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 6 ++++++ arch/x86/kvm/svm/nested.c | 3 ++- arch/x86/kvm/vmx/nested.c | 3 ++- arch/x86/kvm/x86.c | 3 +++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 83f948bdc59a..8eb107ceb45a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -851,6 +851,12 @@ struct kvm_vcpu_arch { /* Protected Guests */ bool guest_state_protected; + + /* + * Set when PDPTS were loaded directly by the userspace without + * reading the guest memory + */ + bool pdptrs_restored_oob; }; struct kvm_lpage_info { diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e3e5775b8f1c..0f80d68a45e1 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1369,7 +1369,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) if (WARN_ON(!is_guest_mode(vcpu))) return true; - if (!nested_npt_enabled(svm) && is_pae_paging(vcpu)) + if (!vcpu->arch.pdptrs_restored_oob && + !nested_npt_enabled(svm) && is_pae_paging(vcpu)) /* * Reload the guest's PDPTRs since after a migration * the guest CR3 might be restored prior to setting the nested diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 0acdda85f36a..78d6c71ab03b 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3113,7 +3113,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) struct page *page; u64 hpa; - if (!nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { + if (!vcpu->arch.pdptrs_restored_oob && + !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { /* * Reload the guest's PDPTRs since after a migration * the guest CR3 might be restored prior to setting the nested diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 11260e83518f..eadfc9caf500 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -815,6 +815,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + vcpu->arch.pdptrs_restored_oob = false; + out: return ret; @@ -10113,6 +10115,7 @@ static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); mmu_reset_needed = 1; + vcpu->arch.pdptrs_restored_oob = true; } if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); -- 2.26.3