On Thu, Apr 15, 2021 at 02:25:36PM +0200, Vlastimil Babka wrote: > > @@ -3294,6 +3295,7 @@ void free_unref_page_list(struct list_head *list) > > struct page *page, *next; > > unsigned long flags, pfn; > > int batch_count = 0; > > + int migratetype; > > > > /* Prepare pages for freeing */ > > list_for_each_entry_safe(page, next, list, lru) { > > @@ -3301,15 +3303,28 @@ void free_unref_page_list(struct list_head *list) > > if (!free_unref_page_prepare(page, pfn)) > > list_del(&page->lru); > > set_page_private(page, pfn); > > Should probably move this below so we don't set private for pages that then go > through free_one_page()? Doesn't seem to be a bug, just unneccessary. > Sure. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1d87ca364680..a9c1282d9c7b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3293,7 +3293,6 @@ void free_unref_page_list(struct list_head *list) pfn = page_to_pfn(page); if (!free_unref_page_prepare(page, pfn)) list_del(&page->lru); - set_page_private(page, pfn); /* * Free isolated pages directly to the allocator, see @@ -3307,6 +3306,8 @@ void free_unref_page_list(struct list_head *list) list_del(&page->lru); } } + + set_page_private(page, pfn); } local_lock_irqsave(&pagesets.lock, flags); -- Mel Gorman SUSE Labs