It's now obvious that __folio_put_small() and __folio_put_large() do almost exactly the same thing. Inline them both into __folio_put(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/swap.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 4cfb98304742..15a7da725576 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -112,42 +112,22 @@ static void page_cache_release(struct folio *folio) unlock_page_lruvec_irqrestore(lruvec, flags); } -static void __folio_put_small(struct folio *folio) -{ - page_cache_release(folio); - mem_cgroup_uncharge(folio); - free_unref_page(&folio->page, 0); -} - -static void __folio_put_large(struct folio *folio) +void __folio_put(struct folio *folio) { - /* - * __page_cache_release() is supposed to be called for thp, not for - * hugetlb. This is because hugetlb page does never have PageLRU set - * (it's never listed to any LRU lists) and no memcg routines should - * be called for hugetlb (it has a separate hugetlb_cgroup.) - */ - if (folio_test_hugetlb(folio)) { + if (unlikely(folio_is_zone_device(folio))) { + free_zone_device_page(&folio->page); + return; + } else if (folio_test_hugetlb(folio)) { free_huge_folio(folio); return; } page_cache_release(folio); - if (folio_test_large_rmappable(folio)) + if (folio_test_large(folio) && folio_test_large_rmappable(folio)) folio_undo_large_rmappable(folio); mem_cgroup_uncharge(folio); free_unref_page(&folio->page, folio_order(folio)); } - -void __folio_put(struct folio *folio) -{ - if (unlikely(folio_is_zone_device(folio))) - free_zone_device_page(&folio->page); - else if (unlikely(folio_test_large(folio))) - __folio_put_large(folio); - else - __folio_put_small(folio); -} EXPORT_SYMBOL(__folio_put); /** -- 2.43.0