Add a small wrapper to handle zapping a specific root. For now, it's little more than syntactic sugar, but in the future it will become a unique flow with rules specific to zapping an unreachable root. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/mmu/tdp_mmu.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 9449cb5baf0b..31fb622249e5 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -79,11 +79,18 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) tdp_mmu_free_sp(sp); } +static bool tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared) +{ + return zap_gfn_range(kvm, root, 0, -1ull, true, false, shared); +} + /* * Note, putting a root might sleep, i.e. the caller must have IRQs enabled and * must not explicitly disable preemption (it will be disabled by virtue of * holding mmu_lock, hence the lack of a might_sleep()). */ + void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared) { @@ -118,7 +125,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, * should have been zapped by kvm_tdp_mmu_zap_invalidated_roots(), and * inserting new SPTEs under an invalid root is a KVM bug. */ - if (zap_gfn_range(kvm, root, 0, -1ull, true, false, shared)) + if (tdp_mmu_zap_root(kvm, root, shared)) WARN_ON_ONCE(root->role.invalid); call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); @@ -923,7 +930,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, * will still flush on yield, but that's a minor performance * blip and not a functional issue. */ - (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true); + (void)tdp_mmu_zap_root(kvm, root, true); kvm_tdp_mmu_put_root(kvm, root, true); } } -- 2.34.0.rc2.393.gf8c9666880-goog