On 2024/3/22 21:00, Matthew Wilcox wrote: > On Fri, Mar 22, 2024 at 04:23:59PM +0800, Miaohe Lin wrote: >>> +++ b/mm/hugetlb.c >>> @@ -1796,7 +1796,8 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, >>> destroy_compound_gigantic_folio(folio, huge_page_order(h)); >>> free_gigantic_folio(folio, huge_page_order(h)); >>> } else { >>> - __free_pages(&folio->page, huge_page_order(h)); >>> + INIT_LIST_HEAD(&folio->_deferred_list); >> >> Will it be better to add a comment to explain why INIT_LIST_HEAD is needed ? Sorry for late, I was on off-the-job training last week. It's really tired. :( > > Maybe? Something like > /* We reused this space for our own purposes */ This one looks good to me. > >>> + folio_put(folio); >> >> Can all __free_pages be replaced with folio_put in mm/hugetlb.c? > > There's only one left, and indeed it can! > > I'll drop this into my tree and send it as a proper patch later. > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index 333f6278ef63..43cc7e6bc374 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -2177,13 +2177,13 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, > nodemask_t *node_alloc_noretry) > { > int order = huge_page_order(h); > - struct page *page; > + struct folio *folio; > bool alloc_try_hard = true; > bool retry = true; > > /* > - * By default we always try hard to allocate the page with > - * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in > + * By default we always try hard to allocate the folio with > + * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in > * a loop (to adjust global huge page counts) and previous allocation > * failed, do not continue to try hard on the same node. Use the > * node_alloc_noretry bitmap to manage this state information. > @@ -2196,43 +2196,42 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, > if (nid == NUMA_NO_NODE) > nid = numa_mem_id(); > retry: > - page = __alloc_pages(gfp_mask, order, nid, nmask); > + folio = __folio_alloc(gfp_mask, order, nid, nmask); > > - /* Freeze head page */ > - if (page && !page_ref_freeze(page, 1)) { > - __free_pages(page, order); > + if (folio && !folio_ref_freeze(folio, 1)) { > + folio_put(folio); > if (retry) { /* retry once */ > retry = false; > goto retry; > } > /* WOW! twice in a row. */ > - pr_warn("HugeTLB head page unexpected inflated ref count\n"); > - page = NULL; > + pr_warn("HugeTLB unexpected inflated folio ref count\n"); > + folio = NULL; > } > > /* > - * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this > - * indicates an overall state change. Clear bit so that we resume > - * normal 'try hard' allocations. > + * If we did not specify __GFP_RETRY_MAYFAIL, but still got a > + * folio this indicates an overall state change. Clear bit so > + * that we resume normal 'try hard' allocations. > */ > - if (node_alloc_noretry && page && !alloc_try_hard) > + if (node_alloc_noretry && folio && !alloc_try_hard) > node_clear(nid, *node_alloc_noretry); > > /* > - * If we tried hard to get a page but failed, set bit so that > + * If we tried hard to get a folio but failed, set bit so that > * subsequent attempts will not try as hard until there is an > * overall state change. > */ > - if (node_alloc_noretry && !page && alloc_try_hard) > + if (node_alloc_noretry && !folio && alloc_try_hard) > node_set(nid, *node_alloc_noretry); > > - if (!page) { > + if (!folio) { > __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); > return NULL; > } > > __count_vm_event(HTLB_BUDDY_PGALLOC); > - return page_folio(page); > + return folio; > } > > static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h, > . This also looks good to me. Thanks for your work.