We're about to add another mmu cache. Stuff the current one in a sub struct so its easier to pass them all to ->zalloc_page(). No functional change intended. Signed-off-by: Oliver Upton <oupton@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 4 +++- arch/arm64/kvm/mmu.c | 14 +++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 94a27a7520f4..c8947597a619 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -372,7 +372,9 @@ struct kvm_vcpu_arch { bool pause; /* Cache some mmu pages needed inside spinlock regions */ - struct kvm_mmu_memory_cache mmu_page_cache; + struct kvm_mmu_caches { + struct kvm_mmu_memory_cache page_cache; + } mmu_caches; /* Target CPU and feature flags */ int target; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index f29d5179196b..7a588928740a 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -91,10 +91,10 @@ static bool kvm_is_device_pfn(unsigned long pfn) static void *stage2_memcache_zalloc_page(void *arg) { - struct kvm_mmu_memory_cache *mc = arg; + struct kvm_mmu_caches *mmu_caches = arg; /* Allocated with __GFP_ZERO, so no need to zero */ - return kvm_mmu_memory_cache_alloc(mc); + return kvm_mmu_memory_cache_alloc(&mmu_caches->page_cache); } static void *kvm_host_zalloc_pages_exact(size_t size) @@ -1073,7 +1073,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, bool shared; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct kvm_mmu_caches *mmu_caches = &vcpu->arch.mmu_caches; struct vm_area_struct *vma; short vma_shift; gfn_t gfn; @@ -1160,7 +1160,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * and a write fault needs to collapse a block entry into a table. */ if (fault_status != FSC_PERM || (logging_active && write_fault)) { - ret = kvm_mmu_topup_memory_cache(memcache, + ret = kvm_mmu_topup_memory_cache(&mmu_caches->page_cache, kvm_mmu_cache_min_pages(kvm)); if (ret) return ret; @@ -1273,7 +1273,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, __pfn_to_phys(pfn), prot, - memcache); + mmu_caches); } /* Mark the page dirty only if the fault is handled successfully */ @@ -1603,12 +1603,12 @@ int kvm_mmu_init(u32 *hyp_va_bits) void kvm_mmu_vcpu_init(struct kvm_vcpu *vcpu) { - vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; + vcpu->arch.mmu_caches.page_cache.gfp_zero = __GFP_ZERO; } void kvm_mmu_vcpu_destroy(struct kvm_vcpu *vcpu) { - kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_caches.page_cache); } void kvm_arch_commit_memory_region(struct kvm *kvm, -- 2.36.0.rc0.470.gd361397f0d-goog