Add a numa_aware_page_table module param to make page tables NUMA aware. Signed-off-by: Vipin Sharma <vipinsh@xxxxxxxxxx> --- include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index efd9b38ea9a2..d48064503b88 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1358,6 +1358,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); +void *kvm_mmu_get_free_page(int nid, gfp_t gfp); + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f2d762878b97..d96c8146e9ba 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -93,6 +93,13 @@ unsigned int halt_poll_ns_shrink; module_param(halt_poll_ns_shrink, uint, 0644); EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); +/* + * If possible, allocate page table's pages on the same node the underlying + * physical page is pointing to. + */ +static bool __read_mostly numa_aware_pagetable = true; +module_param_named(numa_aware_pagetable, numa_aware_pagetable, bool, 0644); + /* * Ordering of locks: * @@ -384,6 +391,21 @@ static void kvm_flush_shadow_all(struct kvm *kvm) kvm_arch_guest_memory_reclaimed(kvm); } +void *kvm_mmu_get_free_page(int nid, gfp_t gfp) +{ + #ifdef CONFIG_NUMA + struct page *spt_page; + + if (numa_aware_pagetable) { + spt_page = alloc_pages_node(nid, gfp, 0); + if (spt_page) + return page_address(spt_page); + } + #endif // CONFIG_NUMA + + return (void *)__get_free_page(gfp); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) -- 2.39.0.314.g84b9a713c41-goog