On Wed, Dec 21, 2022 at 06:34:56PM -0800, Vipin Sharma wrote: > Make split_shadow_page_cache NUMA aware and allocate page table's pages > during the split based on the underlying physical page's NUMA node. > > Signed-off-by: Vipin Sharma <vipinsh@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++--------------- > 2 files changed, 29 insertions(+), 23 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index b1f319ad6f89..7b3f36ae37a4 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1410,7 +1410,7 @@ struct kvm_arch { > * > * Protected by kvm->slots_lock. > */ > - struct kvm_mmu_memory_cache split_shadow_page_cache; > + struct kvm_mmu_memory_cache split_shadow_page_cache[MAX_NUMNODES]; > struct kvm_mmu_memory_cache split_page_header_cache; > > /* > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 511c6ef265ee..7454bfc49a51 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -6126,7 +6126,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, > int kvm_mmu_init_vm(struct kvm *kvm) > { > struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; > - int r; > + int r, nid; > > INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); > INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); > @@ -6145,8 +6145,9 @@ int kvm_mmu_init_vm(struct kvm *kvm) > INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache, > mmu_page_header_cache, NUMA_NO_NODE); > > - INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache, > - NULL, NUMA_NO_NODE); > + for_each_node(nid) > + INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache[nid], > + NULL, NUMA_NO_NODE); ^^^^^^^^^^^^ Should this be nid?