On Wed, 2023-03-08 at 11:41 +0200, Mike Rapoport wrote: > + > +static inline void __free_one_page(struct page *page, unsigned int > order, > + bool cache_refill) > +{ > + unsigned long pfn = page_to_pfn(page); > + unsigned long buddy_pfn; > + unsigned long combined_pfn; > + struct page *buddy; > + unsigned long flags; > + > + spin_lock_irqsave(&free_area->lock, flags); > + > + if (cache_refill) { > + set_pageblock_unmapped(page); > + free_area[order].nr_cached++; > + } > + > + while (order < MAX_ORDER - 1) { > + buddy = find_unmapped_buddy_page_pfn(page, pfn, > order, > + &buddy_pfn); > + if (!buddy) > + break; > + > + del_page_from_free_list(buddy, order); > + combined_pfn = buddy_pfn & pfn; > + page = page + (combined_pfn - pfn); > + pfn = combined_pfn; > + order++; > + } > + > + set_unmapped_order(page, order); > + add_to_free_list(page, order); > + spin_unlock_irqrestore(&free_area->lock, flags); > +} > + The page has to be zeroed before it goes back on the list, right? I didn't see it.