This function will be used imminently. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/mm.h | 7 ++++++ mm/swap.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index a517ef8d2386..4f9e2cfb372e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1515,6 +1515,13 @@ typedef union { void release_pages(release_pages_arg, int nr); void folios_put(struct folio_batch *folios); +struct pfn_range { + unsigned long start; + unsigned long end; +}; + +void pfn_range_put(struct pfn_range *range, unsigned int nr); + static inline void put_page(struct page *page) { struct folio *folio = page_folio(page); diff --git a/mm/swap.c b/mm/swap.c index 8bd15402cd8f..218d2cc4c6f4 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1027,6 +1027,62 @@ void folios_put(struct folio_batch *folios) } EXPORT_SYMBOL(folios_put); +void pfn_range_put(struct pfn_range *range, unsigned int nr) +{ + struct folio_batch folios; + unsigned int i; + struct lruvec *lruvec = NULL; + unsigned long flags = 0; + + folio_batch_init(&folios); + for (i = 0; i < nr; i++) { + struct folio *folio = pfn_folio(range[i].start); + unsigned int refs = range[i].end - range[i].start; + + if (folio_is_zone_device(folio)) { + if (lruvec) { + unlock_page_lruvec_irqrestore(lruvec, flags); + lruvec = NULL; + } + if (put_devmap_managed_page_refs(&folio->page, refs)) + continue; + if (folio_ref_sub_and_test(folio, refs)) + free_zone_device_page(&folio->page); + continue; + } + + if (!folio_ref_sub_and_test(folio, refs)) + continue; + + /* hugetlb has its own memcg */ + if (folio_test_hugetlb(folio)) { + if (lruvec) { + unlock_page_lruvec_irqrestore(lruvec, flags); + lruvec = NULL; + } + free_huge_folio(folio); + continue; + } + + __page_cache_release(folio, &lruvec, &flags); + if (folio_batch_add(&folios, folio) == 0) { + if (lruvec) { + unlock_page_lruvec_irqrestore(lruvec, flags); + lruvec = NULL; + } + mem_cgroup_uncharge_folios(&folios); + free_unref_folios(&folios); + } + } + if (lruvec) + unlock_page_lruvec_irqrestore(lruvec, flags); + + if (folios.nr) { + mem_cgroup_uncharge_folios(&folios); + free_unref_folios(&folios); + } +} + /** * release_pages - batched put_page() * @arg: array of pages to release -- 2.40.1