kvm_tdp_mmu_put_root and kvm_tdp_mmu_free_root are always called together, so merge the functions to simplify TDP MMU root refcounting / freeing. Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 4 +-- arch/x86/kvm/mmu/tdp_mmu.c | 54 ++++++++++++++++++-------------------- arch/x86/kvm/mmu/tdp_mmu.h | 10 +------ 3 files changed, 28 insertions(+), 40 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9c7ef7ca8bf6..47d996a8074f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3153,8 +3153,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); - if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp)) - kvm_tdp_mmu_free_root(kvm, sp); + if (is_tdp_mmu_page(sp)) + kvm_tdp_mmu_put_root(kvm, sp); else if (!--sp->root_count && sp->role.invalid) kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 320cc4454737..279a725061f7 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -41,10 +41,31 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) rcu_barrier(); } -static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) +static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, + gfn_t start, gfn_t end, bool can_yield, bool flush); + +static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) { - if (kvm_tdp_mmu_put_root(kvm, root)) - kvm_tdp_mmu_free_root(kvm, root); + free_page((unsigned long)sp->spt); + kmem_cache_free(mmu_page_header_cache, sp); +} + +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) +{ + gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); + + lockdep_assert_held_write(&kvm->mmu_lock); + + if (--root->root_count) + return; + + WARN_ON(!root->tdp_mmu_page); + + list_del(&root->link); + + zap_gfn_range(kvm, root, 0, max_gfn, false, false); + + tdp_mmu_free_sp(root); } static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, @@ -66,7 +87,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, struct kvm_mmu_page *next_root; next_root = list_next_entry(root, link); - tdp_mmu_put_root(kvm, root); + kvm_tdp_mmu_put_root(kvm, root); return next_root; } @@ -89,31 +110,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, if (kvm_mmu_page_as_id(_root) != _as_id) { \ } else -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield, bool flush); - -static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) -{ - free_page((unsigned long)sp->spt); - kmem_cache_free(mmu_page_header_cache, sp); -} - -void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) -{ - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - - lockdep_assert_held_write(&kvm->mmu_lock); - - WARN_ON(root->root_count); - WARN_ON(!root->tdp_mmu_page); - - list_del(&root->link); - - zap_gfn_range(kvm, root, 0, max_gfn, false, false); - - tdp_mmu_free_sp(root); -} - static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, int level) { diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index c9a081c786a5..d4e32ac5f4c9 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -6,7 +6,6 @@ #include <linux/kvm_host.h> hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); -void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); static inline void kvm_tdp_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *root) @@ -17,14 +16,7 @@ static inline void kvm_tdp_mmu_get_root(struct kvm *kvm, ++root->root_count; } -static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm, - struct kvm_mmu_page *root) -{ - lockdep_assert_held(&kvm->mmu_lock); - --root->root_count; - - return !root->root_count; -} +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, bool can_yield, bool flush); -- 2.31.0.208.g409f899ff0-goog