All callers now use free_unref_folios() so we can delete this function. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx> --- mm/internal.h | 1 - mm/page_alloc.c | 18 ------------------ 2 files changed, 19 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 4d45b351e0fd..3e2b478c610f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -451,7 +451,6 @@ extern int user_min_free_kbytes; void free_unref_page(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); -void free_unref_page_list(struct list_head *list); extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_disable(struct zone *zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eca5b153f732..7600344b997e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2641,24 +2641,6 @@ void free_unref_folios(struct folio_batch *folios) folios->nr = 0; } -void free_unref_page_list(struct list_head *list) -{ - struct folio_batch fbatch; - - folio_batch_init(&fbatch); - while (!list_empty(list)) { - struct folio *folio = list_first_entry(list, struct folio, lru); - - list_del(&folio->lru); - if (folio_batch_add(&fbatch, folio) > 0) - continue; - free_unref_folios(&fbatch); - } - - if (fbatch.nr) - free_unref_folios(&fbatch); -} - /* * split_page takes a non-compound higher-order page, and splits it into * n (1<<order) sub-pages: page[0..n] -- 2.43.0