All callers of mmu_topup_memory_cache() pass the same min/max limits. Simplify the code by just passing the 'struct kvm' instead. Cc: Marc Zyngier <maz@xxxxxxxxxx> Cc: Quentin Perret <qperret@xxxxxxxxxx> Signed-off-by: Will Deacon <will@xxxxxxxxxx> --- arch/arm64/kvm/mmu.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 9102373a9744..e55a28178164 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -124,20 +124,22 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) put_page(virt_to_page(pudp)); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) +static int mmu_topup_memory_cache(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache) { void *page; - BUG_ON(max > KVM_NR_MEM_OBJS); - if (cache->nobjs >= min) + if (cache->nobjs >= kvm_mmu_cache_min_pages(kvm)) return 0; - while (cache->nobjs < max) { + + while (cache->nobjs < KVM_NR_MEM_OBJS) { page = (void *)__get_free_page(GFP_PGTABLE_USER); if (!page) return -ENOMEM; + cache->objects[cache->nobjs++] = page; } + return 0; } @@ -1480,9 +1482,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (writable) pte = kvm_s2pte_mkwrite(pte); - ret = mmu_topup_memory_cache(&cache, - kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(kvm, &cache); if (ret) break; spin_lock(&kvm->mmu_lock); @@ -1880,8 +1880,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mmap_read_unlock(current->mm); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(kvm, memcache); if (ret) return ret; -- 2.28.0.rc0.105.gf9edc3c819-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm