Replace the @max param in mmu_topup_memory_cache() and instead use ARRAY_SIZE() to terminate the loop to fill the cache. This removes a BUG_ON() and sets the stage for moving arm64 to the common memory cache implementation. No functional change intended. Tested-by: Marc Zyngier <maz@xxxxxxxxxx> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/arm64/kvm/mmu.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index a1f6bc70c4e4..9398b66f8a87 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -124,15 +124,13 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) put_page(virt_to_page(pudp)); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) { void *page; - BUG_ON(max > KVM_NR_MEM_OBJS); if (cache->nobjs >= min) return 0; - while (cache->nobjs < max) { + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_PGTABLE_USER); if (!page) return -ENOMEM; @@ -1356,8 +1354,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, pte = kvm_s2pte_mkwrite(pte); ret = mmu_topup_memory_cache(&cache, - kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + kvm_mmu_cache_min_pages(kvm)); if (ret) goto out; spin_lock(&kvm->mmu_lock); @@ -1737,8 +1734,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); if (ret) return ret; -- 2.26.0