From: Yulei Zhang <yulei.kernel@xxxxxxxxx> Release the pre-pinned memory in direct build ept when guest VM exit. Signed-off-by: Yulei Zhang <yuleixzhang@xxxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 03c5e73b96cb..f2124f52b286 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4309,8 +4309,11 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, bool skip_mmu_sync) { - __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), - skip_tlb_flush, skip_mmu_sync); + if (!vcpu->arch.direct_build_tdp) + __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), + skip_tlb_flush, skip_mmu_sync); + else + vcpu->arch.mmu->root_hpa = INVALID_PAGE; } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); @@ -5207,10 +5210,14 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { - kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); - kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); + if (!vcpu->arch.direct_build_tdp) { + kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); + } + vcpu->arch.direct_build_tdp = false; + vcpu->arch.mmu->root_hpa = INVALID_PAGE; } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -6538,6 +6545,14 @@ void kvm_direct_tdp_remove_page_table(struct kvm *kvm, struct kvm_memory_slot *s kvm_flush_remote_tlbs(kvm); } +void kvm_direct_tdp_release_global_root(struct kvm *kvm) +{ + if (kvm->arch.global_root_hpa) + __kvm_walk_global_page(kvm, kvm->arch.global_root_hpa, max_tdp_level); + + return; +} + /* * Calculate mmu pages needed for kvm. */ @@ -6564,9 +6579,13 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { - kvm_mmu_unload(vcpu); - free_mmu_pages(&vcpu->arch.root_mmu); - free_mmu_pages(&vcpu->arch.guest_mmu); + if (vcpu->arch.direct_build_tdp) { + vcpu->arch.mmu->root_hpa = INVALID_PAGE; + } else { + kvm_mmu_unload(vcpu); + free_mmu_pages(&vcpu->arch.root_mmu); + free_mmu_pages(&vcpu->arch.guest_mmu); + } mmu_free_memory_caches(vcpu); } -- 2.17.1