The quilt patch titled Subject: huge_memory: convert unmap_page() to unmap_folio() has been removed from the -mm tree. Its filename was huge_memory-convert-unmap_page-to-unmap_folio.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: huge_memory: convert unmap_page() to unmap_folio() Date: Fri, 2 Sep 2022 20:46:49 +0100 Remove a folio->page->folio conversion. Link: https://lkml.kernel.org/r/20220902194653.1739778-54-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/huge_memory.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) --- a/mm/huge_memory.c~huge_memory-convert-unmap_page-to-unmap_folio +++ a/mm/huge_memory.c @@ -2355,13 +2355,12 @@ void vma_adjust_trans_huge(struct vm_are } } -static void unmap_page(struct page *page) +static void unmap_folio(struct folio *folio) { - struct folio *folio = page_folio(page); enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC; - VM_BUG_ON_PAGE(!PageHead(page), page); + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); /* * Anon pages need migration entries to preserve them, but file @@ -2378,7 +2377,7 @@ static void remap_page(struct folio *fol { int i = 0; - /* If unmap_page() uses try_to_migrate() on file, remove this check */ + /* If unmap_folio() uses try_to_migrate() on file, remove this check */ if (!folio_test_anon(folio)) return; for (;;) { @@ -2428,7 +2427,7 @@ static void __split_huge_page_tail(struc * for example lock_page() which set PG_waiters. * * Note that for mapped sub-pages of an anonymous THP, - * PG_anon_exclusive has been cleared in unmap_page() and is stored in + * PG_anon_exclusive has been cleared in unmap_folio() and is stored in * the migration entry instead from where remap_page() will restore it. * We can still have PG_anon_exclusive set on effectively unmapped and * unreferenced sub-pages of an anonymous THP: we can simply drop @@ -2700,7 +2699,7 @@ int split_huge_page_to_list(struct page } /* - * Racy check if we can split the page, before unmap_page() will + * Racy check if we can split the page, before unmap_folio() will * split PMDs */ if (!can_split_folio(folio, &extra_pins)) { @@ -2708,7 +2707,7 @@ int split_huge_page_to_list(struct page goto out_unlock; } - unmap_page(&folio->page); + unmap_folio(folio); /* block interrupt reentry in xa_lock and spinlock */ local_irq_disable(); _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are