All callers now have a folio, so convert the function to take a folio. Saves a couple of calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/swap.h | 4 ++-- mm/memcontrol.c | 6 +++--- mm/memory.c | 2 +- mm/swapfile.c | 2 +- mm/vmscan.c | 3 +-- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index f16c9af6bf32..8178ec471fe3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -688,7 +688,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p } extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); -extern bool mem_cgroup_swap_full(struct page *page); +extern bool mem_cgroup_swap_full(struct folio *folio); #else static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) { @@ -710,7 +710,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) return get_nr_swap_pages(); } -static inline bool mem_cgroup_swap_full(struct page *page) +static inline bool mem_cgroup_swap_full(struct folio *folio) { return vm_swap_full(); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3b0698faf1ee..557579f43e7b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7375,18 +7375,18 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) return nr_swap_pages; } -bool mem_cgroup_swap_full(struct page *page) +bool mem_cgroup_swap_full(struct folio *folio) { struct mem_cgroup *memcg; - VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (vm_swap_full()) return true; if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) return false; - memcg = page_memcg(page); + memcg = folio_memcg(folio); if (!memcg) return false; diff --git a/mm/memory.c b/mm/memory.c index 5b440045d306..b6f2ccda4f45 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3641,7 +3641,7 @@ static inline bool should_try_to_free_swap(struct folio *folio, { if (!folio_test_swapcache(folio)) return false; - if (mem_cgroup_swap_full(&folio->page) || (vma->vm_flags & VM_LOCKED) || + if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || folio_test_mlocked(folio)) return true; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 9ee42a12cffc..74929a8cbf88 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -144,7 +144,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, if (folio_trylock(folio)) { if ((flags & TTRS_ANYWAY) || ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || - ((flags & TTRS_FULL) && mem_cgroup_swap_full(&folio->page))) + ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))) ret = folio_free_swap(folio); folio_unlock(folio); } diff --git a/mm/vmscan.c b/mm/vmscan.c index ac7f6f77e28a..85af57bbfd81 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2002,8 +2002,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (folio_test_swapcache(folio) && - (mem_cgroup_swap_full(&folio->page) || - folio_test_mlocked(folio))) + (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) folio_free_swap(folio); VM_BUG_ON_FOLIO(folio_test_active(folio), folio); if (!folio_test_mlocked(folio)) { -- 2.35.1