On Tue, 2024-01-02 at 21:12 +0800, Gang Li wrote: > The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the > code, its readability can be improved, facilitating future modifications. > > This patch extracts two functions to reduce the complexity of > `hugetlb_hstate_alloc_pages` and has no functional changes. > > - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through > each online node and performs allocation if necessary. > - hugetlb_hstate_alloc_pages_report() report error during allocation. > And the value of h->max_huge_pages is updated accordingly. Minor nit, I think hugetlb_hstate_alloc_pages_errcheck() is more descriptive than hugetlb_hstate_alloc_pages_report(). Otherwise Reviewed-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx> > > Signed-off-by: Gang Li <gang.li@xxxxxxxxx> > --- > mm/hugetlb.c | 46 +++++++++++++++++++++++++++++----------------- > 1 file changed, 29 insertions(+), 17 deletions(-) > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index ed1581b670d42..2606135ec55e6 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -3482,6 +3482,33 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) > h->max_huge_pages_node[nid] = i; > } > > +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate *h) > +{ > + int i; > + bool node_specific_alloc = false; > + > + for_each_online_node(i) { > + if (h->max_huge_pages_node[i] > 0) { > + hugetlb_hstate_alloc_pages_onenode(h, i); > + node_specific_alloc = true; > + } > + } > + > + return node_specific_alloc; > +} > + > +static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, struct hstate *h) > +{ > + if (allocated < h->max_huge_pages) { > + char buf[32]; > + > + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); > + pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", > + h->max_huge_pages, buf, allocated); > + h->max_huge_pages = allocated; > + } > +} > + > /* > * NOTE: this routine is called in different contexts for gigantic and > * non-gigantic pages. > @@ -3499,7 +3526,6 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) > struct folio *folio; > LIST_HEAD(folio_list); > nodemask_t *node_alloc_noretry; > - bool node_specific_alloc = false; > > /* skip gigantic hugepages allocation if hugetlb_cma enabled */ > if (hstate_is_gigantic(h) && hugetlb_cma_size) { > @@ -3508,14 +3534,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) > } > > /* do node specific alloc */ > - for_each_online_node(i) { > - if (h->max_huge_pages_node[i] > 0) { > - hugetlb_hstate_alloc_pages_onenode(h, i); > - node_specific_alloc = true; > - } > - } > - > - if (node_specific_alloc) > + if (hugetlb_hstate_alloc_pages_node_specific(h)) > return; > > /* below will do all node balanced alloc */ > @@ -3558,14 +3577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) > /* list will be empty if hstate_is_gigantic */ > prep_and_add_allocated_folios(h, &folio_list); > > - if (i < h->max_huge_pages) { > - char buf[32]; > - > - string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); > - pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", > - h->max_huge_pages, buf, i); > - h->max_huge_pages = i; > - } > + hugetlb_hstate_alloc_pages_report(i, h); > kfree(node_alloc_noretry); > } >