On Fri, Oct 15, 2021, Paolo Bonzini wrote: > diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c > index 21427e84a82e..0d9842472288 100644 > --- a/arch/x86/kvm/mmu/page_track.c > +++ b/arch/x86/kvm/mmu/page_track.c > @@ -36,8 +36,7 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, > > for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { > slot->arch.gfn_track[i] = > - kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), > - GFP_KERNEL_ACCOUNT); > + vcalloc(npages, sizeof(*slot->arch.gfn_track[i])); This loses the memcg accounting, which is somewhat important for the theoretical 4MiB allocations :-) Maybe split out the introduction of vcalloc() to a separate patch (or two) and introduce additional helpers to allow passing in gfp_t to e.g. __vzalloc()? > if (!slot->arch.gfn_track[i]) > goto track_free; > } > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index aabd3a2ec1bc..07f5760ea30c 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -11394,7 +11394,7 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot, > > WARN_ON(slot->arch.rmap[i]); > > - slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); > + slot->arch.rmap[i] = vcalloc(lpages, sz); > if (!slot->arch.rmap[i]) { > memslot_rmap_free(slot); > return -ENOMEM; > @@ -11475,7 +11475,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm, > > lpages = __kvm_mmu_slot_lpages(slot, npages, level); > > - linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); > + linfo = vcalloc(lpages, sizeof(*linfo)); > if (!linfo) > goto out_free; All of the associated free paths should be converted to vfree().