On 3/7/2023 6:41 AM, Vipin Sharma wrote:
Add unused pages in mmu_shadowed_info_cache to global MMU unused page
cache counter i.e. kvm_total_unused_cached_pages. These pages will be
freed by MMU shrinker in future commit.
This patch mainly renames some functions, but the commit log doesn't
reflect what
this patch does. Please change the commit log or squash the patch.
Signed-off-by: Vipin Sharma <vipinsh@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 3 ++-
arch/x86/kvm/mmu/mmu.c | 8 ++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4322c7020d5d..185719dbeb81 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -792,7 +792,8 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_header_cache;
/*
- * Protect allocation and release of pages from mmu_shadow_page_cache.
+ * Protect allocation and release of pages from mmu_shadow_page_cache
+ * and mmu_shadowed_info_cache.
*/
struct mutex mmu_shadow_page_cache_lock;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0a0962d8108b..b7ca31b5699c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -715,8 +715,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
return r;
if (maybe_indirect) {
- r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
- PT64_ROOT_MAX_LEVEL);
+ r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
+ PT64_ROOT_MAX_LEVEL);
if (r)
return r;
}
@@ -729,8 +729,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
mutex_lock(&vcpu->arch.mmu_shadow_page_cache_lock);
mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+ mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
mutex_unlock(&vcpu->arch.mmu_shadow_page_cache_lock);
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}
@@ -2197,7 +2197,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
sp->spt = mmu_sp_memory_cache_alloc(caches->shadow_page_cache);
if (!role.direct)
- sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
+ sp->shadowed_translation = mmu_sp_memory_cache_alloc(caches->shadowed_info_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);