This is a note to let you know that I've just added the patch titled khugepaged: convert alloc_charge_hpage to alloc_charge_folio to the 6.6-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: khugepaged-convert-alloc_charge_hpage-to-alloc_charg.patch and it can be found in the queue-6.6 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 64f56409384b5eb2b3a82c0c9ef725f63a8a5359 Author: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Date: Wed Apr 3 18:18:31 2024 +0100 khugepaged: convert alloc_charge_hpage to alloc_charge_folio [ Upstream commit d5ab50b9412c0bba750eef5a34fd2937de1aee55 ] Both callers want to deal with a folio, so return a folio from this function. Link: https://lkml.kernel.org/r/20240403171838.1445826-3-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Stable-dep-of: 37f0b47c5143 ("mm: khugepaged: fix the arguments order in khugepaged_collapse_file trace point") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d0fcfa47085b4..b197323450b5a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1041,7 +1041,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, return result; } -static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, +static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm, struct collapse_control *cc) { gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : @@ -1051,7 +1051,7 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask); if (!folio) { - *hpage = NULL; + *foliop = NULL; count_vm_event(THP_COLLAPSE_ALLOC_FAILED); return SCAN_ALLOC_HUGE_PAGE_FAIL; } @@ -1059,13 +1059,13 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { folio_put(folio); - *hpage = NULL; + *foliop = NULL; return SCAN_CGROUP_CHARGE_FAIL; } count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1); - *hpage = folio_page(folio, 0); + *foliop = folio; return SCAN_SUCCEED; } @@ -1094,7 +1094,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ mmap_read_unlock(mm); - result = alloc_charge_hpage(&hpage, mm, cc); + result = alloc_charge_folio(&folio, mm, cc); + hpage = &folio->page; if (result != SCAN_SUCCEED) goto out_nolock; @@ -1197,7 +1198,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, if (unlikely(result != SCAN_SUCCEED)) goto out_up_write; - folio = page_folio(hpage); /* * The smp_wmb() inside __folio_mark_uptodate() ensures the * copy_huge_page writes become visible before the set_pmd_at() @@ -1786,7 +1786,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, struct page *hpage; struct page *page; struct page *tmp; - struct folio *folio; + struct folio *folio, *new_folio; pgoff_t index = 0, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); @@ -1797,7 +1797,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); - result = alloc_charge_hpage(&hpage, mm, cc); + result = alloc_charge_folio(&new_folio, mm, cc); + hpage = &new_folio->page; if (result != SCAN_SUCCEED) goto out;