tdp_mmu_alloc_sp_for_split() allocates page tables for Eager Page Splitting. Currently it does not specify a NUMA node preference, so it will try to allocate from the local node. The thread doing eager page splitting is supplied by the userspace and may not be running on the same node where it would be best for page tables to be allocated. We can improve TDP MMU eager page splitting by making tdp_mmu_alloc_sp_for_split() NUMA-aware. Specifically, when splitting a huge page, allocate the new lower level page tables on the same node as the huge page. __get_free_page() is replaced by alloc_page_nodes(). This introduces two functional changes. 1. __get_free_page() removes gfp flag __GFP_HIGHMEM via call to __get_free_pages(). This should not be an issue as __GFP_HIGHMEM flag is not passed in tdp_mmu_alloc_sp_for_split() anyway. 2. __get_free_page() calls alloc_pages() and use thread's mempolicy for the NUMA node allocation. From this commit, thread's mempolicy will not be used and first preference will be to allocate on the node where huge page was present. dirty_log_perf_test for 416 vcpu and 1GB/vcpu configuration on a 8 NUMA node machine showed dirty memory time improvements between 2% - 35% in multiple runs. Suggested-by: David Matlack <dmatlack@xxxxxxxxxx> Signed-off-by: Vipin Sharma <vipinsh@xxxxxxxxxx> --- arch/x86/kvm/mmu/tdp_mmu.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index bf2ccf9debcaa..1e30e18fc6a03 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1402,9 +1402,19 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, return spte_set; } -static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) +/* + * Caller's responsibility to pass a valid spte which has the shadow page + * present. + */ +static int tdp_mmu_spte_to_nid(u64 spte) +{ + return page_to_nid(pfn_to_page(spte_to_pfn(spte))); +} + +static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(int nid, gfp_t gfp) { struct kvm_mmu_page *sp; + struct page *spt_page; gfp |= __GFP_ZERO; @@ -1412,11 +1422,12 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) if (!sp) return NULL; - sp->spt = (void *)__get_free_page(gfp); - if (!sp->spt) { + spt_page = alloc_pages_node(nid, gfp, 0); + if (!spt_page) { kmem_cache_free(mmu_page_header_cache, sp); return NULL; } + sp->spt = page_address(spt_page); return sp; } @@ -1426,6 +1437,9 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, bool shared) { struct kvm_mmu_page *sp; + int nid; + + nid = tdp_mmu_spte_to_nid(iter->old_spte); /* * Since we are allocating while under the MMU lock we have to be @@ -1436,7 +1450,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, * If this allocation fails we drop the lock and retry with reclaim * allowed. */ - sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); + sp = __tdp_mmu_alloc_sp_for_split(nid, GFP_NOWAIT | __GFP_ACCOUNT); if (sp) return sp; @@ -1448,7 +1462,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, write_unlock(&kvm->mmu_lock); iter->yielded = true; - sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); + sp = __tdp_mmu_alloc_sp_for_split(nid, GFP_KERNEL_ACCOUNT); if (shared) read_lock(&kvm->mmu_lock); -- 2.37.1.455.g008518b4e5-goog