On Wed, Jun 12, 2024 at 02:04:21AM +0000, Wei Yang wrote: > Function deferred_[init|free]_pages are only used in > deferred_init_maxorder(), which makes sure the range to init/free is > within MAX_ORDER_NR_PAGES size. > > With this knowledge, we can simplify these two functions. Since > > * only the first pfn could be IS_MAX_ORDER_ALIGNED() > > Also since the range passed to deferred_[init|free]_pages is always from > memblock.memory for those we have already allocated memmap to cover, > pfn_valid() always return true. Then we can remove related check. > > Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx> > CC: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> > CC: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> > CC: David Hildenbrand <david@xxxxxxxxxx> Reviewed-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> > --- > mm/mm_init.c | 63 +++++++--------------------------------------------- > 1 file changed, 8 insertions(+), 55 deletions(-) > > diff --git a/mm/mm_init.c b/mm/mm_init.c > index c152c60eca3d..63d70fc60705 100644 > --- a/mm/mm_init.c > +++ b/mm/mm_init.c > @@ -1911,7 +1911,7 @@ unsigned long __init node_map_pfn_alignment(void) > } > > #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT > -static void __init deferred_free_range(unsigned long pfn, > +static void __init deferred_free_pages(unsigned long pfn, > unsigned long nr_pages) > { > struct page *page; > @@ -1950,69 +1950,22 @@ static inline void __init pgdat_init_report_one_done(void) > complete(&pgdat_init_all_done_comp); > } > > -/* > - * Returns true if page needs to be initialized or freed to buddy allocator. > - * > - * We check if a current MAX_PAGE_ORDER block is valid by only checking the > - * validity of the head pfn. > - */ > -static inline bool __init deferred_pfn_valid(unsigned long pfn) > -{ > - if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) > - return false; > - return true; > -} > - > -/* > - * Free pages to buddy allocator. Try to free aligned pages in > - * MAX_ORDER_NR_PAGES sizes. > - */ > -static void __init deferred_free_pages(unsigned long pfn, > - unsigned long end_pfn) > -{ > - unsigned long nr_free = 0; > - > - for (; pfn < end_pfn; pfn++) { > - if (!deferred_pfn_valid(pfn)) { > - deferred_free_range(pfn - nr_free, nr_free); > - nr_free = 0; > - } else if (IS_MAX_ORDER_ALIGNED(pfn)) { > - deferred_free_range(pfn - nr_free, nr_free); > - nr_free = 1; > - } else { > - nr_free++; > - } > - } > - /* Free the last block of pages to allocator */ > - deferred_free_range(pfn - nr_free, nr_free); > -} > - > /* > * Initialize struct pages. We minimize pfn page lookups and scheduler checks > * by performing it only once every MAX_ORDER_NR_PAGES. > * Return number of pages initialized. > */ > -static unsigned long __init deferred_init_pages(struct zone *zone, > - unsigned long pfn, > - unsigned long end_pfn) > +static unsigned long __init deferred_init_pages(struct zone *zone, > + unsigned long pfn, > + unsigned long end_pfn) > { > int nid = zone_to_nid(zone); > - unsigned long nr_pages = 0; > + unsigned long nr_pages = end_pfn - pfn; > int zid = zone_idx(zone); > - struct page *page = NULL; > + struct page *page = pfn_to_page(pfn); > > - for (; pfn < end_pfn; pfn++) { > - if (!deferred_pfn_valid(pfn)) { > - page = NULL; > - continue; > - } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { > - page = pfn_to_page(pfn); > - } else { > - page++; > - } > + for (; pfn < end_pfn; pfn++, page++) > __init_single_page(page, pfn, zid, nid); > - nr_pages++; > - } > return nr_pages; > } > > @@ -2096,7 +2049,7 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, > break; > > t = min(mo_pfn, epfn); > - deferred_free_pages(spfn, t); > + deferred_free_pages(spfn, t - spfn); > > if (mo_pfn <= epfn) > break; > -- > 2.34.1 > -- Sincerely yours, Mike.