Rename 3 functions: kvm_mmu_get_page() -> kvm_mmu_get_sp() kvm_mmu_alloc_page() -> kvm_mmu_alloc_sp() kvm_mmu_free_page() -> kvm_mmu_free_sp() This change makes it clear that these functions deal with shadow pages rather than struct pages. Signed-off-by: David Matlack <dmatlack@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 24b3cf53aa12..6f55af9c66db 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1679,7 +1679,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) percpu_counter_add(&kvm_total_used_mmu_pages, nr); } -static void kvm_mmu_free_page(struct kvm_mmu_page *sp) +static void kvm_mmu_free_sp(struct kvm_mmu_page *sp) { MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); hlist_del(&sp->hash_link); @@ -1717,7 +1717,7 @@ static void drop_parent_pte(struct kvm_mmu_page *sp, mmu_spte_clear_no_track(parent_pte); } -static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) +static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct) { struct kvm_mmu_page *sp; @@ -2152,7 +2152,7 @@ static struct kvm_mmu_page *kvm_mmu_create_sp(struct kvm_vcpu *vcpu, ++vcpu->kvm->stat.mmu_cache_miss; - sp = kvm_mmu_alloc_page(vcpu, role.direct); + sp = kvm_mmu_alloc_sp(vcpu, role.direct); sp->gfn = gfn; sp->role = role; @@ -2168,8 +2168,8 @@ static struct kvm_mmu_page *kvm_mmu_create_sp(struct kvm_vcpu *vcpu, return sp; } -static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, - union kvm_mmu_page_role role) +static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn, + union kvm_mmu_page_role role) { struct kvm_mmu_page *sp; bool created = false; @@ -2208,7 +2208,7 @@ static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, role = kvm_mmu_child_role(parent_sp, direct, access); - return kvm_mmu_get_page(vcpu, gfn, role); + return kvm_mmu_get_sp(vcpu, gfn, role); } static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, @@ -2478,7 +2478,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, list_for_each_entry_safe(sp, nsp, invalid_list, link) { WARN_ON(!sp->role.invalid || sp->root_count); - kvm_mmu_free_page(sp); + kvm_mmu_free_sp(sp); } } @@ -3406,7 +3406,7 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, role.quadrant = quadrant; } - sp = kvm_mmu_get_page(vcpu, gfn, role); + sp = kvm_mmu_get_sp(vcpu, gfn, role); ++sp->root_count; return __pa(sp->spt); -- 2.35.0.rc2.247.g8bbb082509-goog