The quilt patch titled Subject: mm: huge_memory: use a folio in change_huge_pmd() has been removed from the -mm tree. Its filename was mm-huge_memory-use-a-folio-in-change_huge_pmd.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Subject: mm: huge_memory: use a folio in change_huge_pmd() Date: Wed, 18 Oct 2023 22:07:57 +0800 Use a folio in change_huge_pmd(), which helps to remove last xchg_page_access_time() caller. Link: https://lkml.kernel.org/r/20231018140806.2783514-11-wangkefeng.wang@xxxxxxxxxx Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Huang Ying <ying.huang@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Juri Lelli <juri.lelli@xxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Cc: Zi Yan <ziy@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/huge_memory.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) --- a/mm/huge_memory.c~mm-huge_memory-use-a-folio-in-change_huge_pmd +++ a/mm/huge_memory.c @@ -1856,7 +1856,7 @@ int change_huge_pmd(struct mmu_gather *t #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (is_swap_pmd(*pmd)) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - struct page *page = pfn_swap_entry_to_page(entry); + struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); pmd_t newpmd; VM_BUG_ON(!is_pmd_migration_entry(*pmd)); @@ -1865,7 +1865,7 @@ int change_huge_pmd(struct mmu_gather *t * A protection check is difficult so * just be safe and disable write */ - if (PageAnon(page)) + if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry(swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); @@ -1887,7 +1887,7 @@ int change_huge_pmd(struct mmu_gather *t #endif if (prot_numa) { - struct page *page; + struct folio *folio; bool toptier; /* * Avoid trapping faults against the zero page. The read-only @@ -1900,8 +1900,8 @@ int change_huge_pmd(struct mmu_gather *t if (pmd_protnone(*pmd)) goto unlock; - page = pmd_page(*pmd); - toptier = node_is_toptier(page_to_nid(page)); + folio = page_folio(pmd_page(*pmd)); + toptier = node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa * balancing is disabled @@ -1912,7 +1912,8 @@ int change_huge_pmd(struct mmu_gather *t if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, jiffies_to_msecs(jiffies)); + folio_xchg_access_time(folio, + jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical _ Patches currently in -mm which might be from wangkefeng.wang@xxxxxxxxxx are