alloc_charge_folio allocates the new folio for the khugepaged collapse. Generalize the order of the folio allocations to support future mTHP collapsing. No functional changes in this patch. Signed-off-by: Nico Pache <npache@xxxxxxxxxx> --- mm/khugepaged.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index e2e6ca9265ab..6daf3a943a1a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1070,14 +1070,14 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, } static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm, - struct collapse_control *cc) + struct collapse_control *cc, int order) { gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : GFP_TRANSHUGE); int node = khugepaged_find_target_node(cc); struct folio *folio; - folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask); + folio = __folio_alloc(gfp, order, node, &cc->alloc_nmask); if (!folio) { *foliop = NULL; count_vm_event(THP_COLLAPSE_ALLOC_FAILED); @@ -1121,7 +1121,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ mmap_read_unlock(mm); - result = alloc_charge_folio(&folio, mm, cc); + result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER); if (result != SCAN_SUCCEED) goto out_nolock; @@ -1834,7 +1834,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); - result = alloc_charge_folio(&new_folio, mm, cc); + result = alloc_charge_folio(&new_folio, mm, cc, HPAGE_PMD_ORDER); if (result != SCAN_SUCCEED) goto out; -- 2.47.1