All callers now use free_unref_folios() so we can delete this function. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx> --- mm/internal.h | 1 - mm/page_alloc.c | 18 ------------------ 2 files changed, 19 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 3ca7e9d45b33..cc91830f6eae 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -454,7 +454,6 @@ extern int user_min_free_kbytes; void free_unref_page(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); -void free_unref_page_list(struct list_head *list); extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_disable(struct zone *zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aa7026d81d07..01b60769726e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2641,24 +2641,6 @@ void free_unref_folios(struct folio_batch *folios) folio_batch_reinit(folios); } -void free_unref_page_list(struct list_head *list) -{ - struct folio_batch fbatch; - - folio_batch_init(&fbatch); - while (!list_empty(list)) { - struct folio *folio = list_first_entry(list, struct folio, lru); - - list_del(&folio->lru); - if (folio_batch_add(&fbatch, folio) > 0) - continue; - free_unref_folios(&fbatch); - } - - if (fbatch.nr) - free_unref_folios(&fbatch); -} - /* * split_page takes a non-compound higher-order page, and splits it into * n (1<<order) sub-pages: page[0..n] -- 2.43.0