On Wed, 4 May 2022, Zach O'Keefe wrote: > @@ -1069,10 +1067,34 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, > return true; > } > > -static void collapse_huge_page(struct mm_struct *mm, > - unsigned long address, > - struct page **hpage, > - int node, int referenced, int unmapped) > +static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, > + struct collapse_control *cc) > +{ > +#ifdef CONFIG_NUMA > + const struct cpumask *cpumask; > +#endif > + gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; > + int node = khugepaged_find_target_node(cc); > + > +#ifdef CONFIG_NUMA > + /* sched to specified node before huge page memory copy */ > + if (task_node(current) != node) { > + cpumask = cpumask_of_node(node); > + if (!cpumask_empty(cpumask)) > + set_cpus_allowed_ptr(current, cpumask); > + } > +#endif > + if (!khugepaged_alloc_page(hpage, gfp, node)) > + return SCAN_ALLOC_HUGE_PAGE_FAIL; > + if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) > + return SCAN_CGROUP_CHARGE_FAIL; > + count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); > + return SCAN_SUCCEED; > +} Lots of ifdefs here, I wonder if we can define a helper function that is a no-op when CONFIG_NUMA is disabled that we can call into here instead. Otherwise this looks like a nice unification. After this is cleaned up feel free to add Acked-by: David Rientjes <rientjes@xxxxxxxxxx>