The quilt patch titled Subject: mm: swap: convert mark_page_lazyfree() to folio_mark_lazyfree() has been removed from the -mm tree. Its filename was mm-swap-convert-mark_page_lazyfree-to-folio_mark_lazyfree.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Subject: mm: swap: convert mark_page_lazyfree() to folio_mark_lazyfree() Date: Fri, 9 Dec 2022 10:06:18 +0800 mark_page_lazyfree() and the callers are converted to use folio, this rename and make it to take in a folio argument instead of calling page_folio(). Link: https://lkml.kernel.org/r/20221209020618.190306-1-wangkefeng.wang@xxxxxxxxxx Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/swap.h | 2 +- mm/huge_memory.c | 2 +- mm/madvise.c | 2 +- mm/swap.c | 12 +++++------- 4 files changed, 8 insertions(+), 10 deletions(-) --- a/include/linux/swap.h~mm-swap-convert-mark_page_lazyfree-to-folio_mark_lazyfree +++ a/include/linux/swap.h @@ -402,7 +402,7 @@ extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); extern void deactivate_page(struct page *page); -extern void mark_page_lazyfree(struct page *page); +void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); extern void lru_cache_add_inactive_or_unevictable(struct page *page, --- a/mm/huge_memory.c~mm-swap-convert-mark_page_lazyfree-to-folio_mark_lazyfree +++ a/mm/huge_memory.c @@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_ga tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } - mark_page_lazyfree(&folio->page); + folio_mark_lazyfree(folio); ret = true; out: spin_unlock(ptl); --- a/mm/madvise.c~mm-swap-convert-mark_page_lazyfree-to-folio_mark_lazyfree +++ a/mm/madvise.c @@ -728,7 +728,7 @@ static int madvise_free_pte_range(pmd_t set_pte_at(mm, addr, pte, ptent); tlb_remove_tlb_entry(tlb, pte, addr); } - mark_page_lazyfree(&folio->page); + folio_mark_lazyfree(folio); } out: if (nr_swap) { --- a/mm/swap.c~mm-swap-convert-mark_page_lazyfree-to-folio_mark_lazyfree +++ a/mm/swap.c @@ -757,16 +757,14 @@ void deactivate_page(struct page *page) } /** - * mark_page_lazyfree - make an anon page lazyfree - * @page: page to deactivate + * folio_mark_lazyfree - make an anon folio lazyfree + * @folio: folio to deactivate * - * mark_page_lazyfree() moves @page to the inactive file list. - * This is done to accelerate the reclaim of @page. + * folio_mark_lazyfree() moves @folio to the inactive file list. + * This is done to accelerate the reclaim of @folio. */ -void mark_page_lazyfree(struct page *page) +void folio_mark_lazyfree(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_lru(folio) && folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { _ Patches currently in -mm which might be from wangkefeng.wang@xxxxxxxxxx are mm-hwposion-support-recovery-from-ksm_might_need_to_copy.patch mm-hwposion-support-recovery-from-ksm_might_need_to_copy-v3.patch mm-madvise-use-vm_normal_folio-in-madvise_free_pte_range.patch