The patch titled Subject: mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios has been added to the -mm mm-unstable branch. Its filename is mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> Subject: mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios Date: Thu, 13 Oct 2022 20:13:01 -0700 Continue to use a folio inside free_huge_page() by converting hugetlb_cgroup_uncharge_page*() to folios. Link: https://lkml.kernel.org/r/20221014031303.231740-8-sidhartha.kumar@xxxxxxxxxx Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> Cc: Bui Quang Minh <minhquangbui99@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Mina Almasry <almasrymina@xxxxxxxxxx> Cc: Muchun Song <songmuchun@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/hugetlb_cgroup.h | 16 ++++++++-------- mm/hugetlb.c | 15 +++++++++------ mm/hugetlb_cgroup.c | 21 ++++++++++----------- 3 files changed, 27 insertions(+), 25 deletions(-) --- a/include/linux/hugetlb_cgroup.h~mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios +++ a/include/linux/hugetlb_cgroup.h @@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page); -extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, - struct page *page); -extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, - struct page *page); +extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, + struct folio *folio); +extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, + struct folio *folio); extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg); @@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int id { } -static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, - struct page *page) +static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, + struct folio *folio) { } -static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx, +static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, - struct page *page) + struct folio *folio) { } static inline void hugetlb_cgroup_uncharge_cgroup(int idx, --- a/mm/hugetlb.c~mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios +++ a/mm/hugetlb.c @@ -1734,10 +1734,10 @@ void free_huge_page(struct page *page) spin_lock_irqsave(&hugetlb_lock, flags); folio_clear_hugetlb_migratable(folio); - hugetlb_cgroup_uncharge_page(hstate_index(h), - pages_per_huge_page(h), page); - hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), - pages_per_huge_page(h), page); + hugetlb_cgroup_uncharge_folio(hstate_index(h), + pages_per_huge_page(h), folio); + hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), + pages_per_huge_page(h), folio); if (restore_reserve) h->resv_huge_pages++; @@ -2862,6 +2862,7 @@ struct page *alloc_huge_page(struct vm_a struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; + struct folio *folio; long map_chg, map_commit; long gbl_chg; int ret, idx; @@ -2925,6 +2926,7 @@ struct page *alloc_huge_page(struct vm_a * a reservation exists for the allocation. */ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); + if (!page) { spin_unlock_irq(&hugetlb_lock); page = alloc_buddy_huge_page_with_mpol(h, vma, addr); @@ -2939,6 +2941,7 @@ struct page *alloc_huge_page(struct vm_a set_page_refcounted(page); /* Fall through */ } + folio = page_folio(page); hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. @@ -2968,8 +2971,8 @@ struct page *alloc_huge_page(struct vm_a rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); if (deferred_reserve) - hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), - pages_per_huge_page(h), page); + hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), + pages_per_huge_page(h), folio); } return page; --- a/mm/hugetlb_cgroup.c~mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios +++ a/mm/hugetlb_cgroup.c @@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(i /* * Should be called with hugetlb_lock held */ -static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, - struct page *page, bool rsvd) +static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, + struct folio *folio, bool rsvd) { struct hugetlb_cgroup *h_cg; - struct folio *folio = page_folio(page); if (hugetlb_cgroup_disabled()) return; @@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_pa css_put(&h_cg->css); else { unsigned long usage = - h_cg->nodeinfo[page_to_nid(page)]->usage[idx]; + h_cg->nodeinfo[folio_nid(folio)]->usage[idx]; /* * This write is not atomic due to fetching usage and writing * to it, but that's fine because we call this with * hugetlb_lock held anyway. */ - WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx], + WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx], usage - nr_pages); } } -void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, - struct page *page) +void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, + struct folio *folio) { - __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false); + __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false); } -void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, - struct page *page) +void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, + struct folio *folio) { - __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true); + __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true); } static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, _ Patches currently in -mm which might be from sidhartha.kumar@xxxxxxxxxx are mm-hugetlb_cgroup-convert-__set_hugetlb_cgroup-to-folios.patch mm-hugetlb_cgroup-convert-hugetlb_cgroup_from_page-to-folios.patch mm-hugetlb_cgroup-convert-set_hugetlb_cgroup-to-folios.patch mm-hugetlb_cgroup-convert-hugetlb_cgroup_migrate-to-folios.patch mm-hugetlb-convert-isolate_or_dissolve_huge_page-to-folios.patch mm-hugetlb-convert-free_huge_page-to-folios.patch mm-hugetlb_cgroup-convert-hugetlb_cgroup_uncharge_page-to-folios.patch mm-hugeltb_cgroup-convert-hugetlb_cgroup_commit_charge-to-folios.patch mm-hugetlb-convert-move_hugetlb_state-to-folios.patch