PMD-mapped hugetlb vmas will also reach pagemap_pmd_range. Add the required code so it knows how to handle those there. Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> --- fs/proc/task_mmu.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4d94b6ce58dd..ec429d82b921 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1824,9 +1824,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spinlock_t *ptl; pte_t *pte, *orig_pte; int err = 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES - ptl = pmd_trans_huge_lock(pmdp, vma); + ptl = pmd_huge_lock(pmdp, vma); if (ptl) { unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT; u64 flags = 0, frame = 0; @@ -1848,7 +1848,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pm->show_pfn) frame = pmd_pfn(pmd) + idx; } -#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); unsigned long offset; @@ -1861,7 +1860,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, frame = swp_type(entry) | (offset << MAX_SWAPFILES_SHIFT); } - flags |= PM_SWAP; + if (!is_vm_hugetlb_page(vma)) + flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; if (pmd_swp_uffd_wp(pmd)) @@ -1869,7 +1869,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, VM_BUG_ON(!is_pmd_migration_entry(pmd)); page = pfn_swap_entry_to_page(entry); } -#endif if (page) { folio = page_folio(page); @@ -1899,7 +1898,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spin_unlock(ptl); return err; } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ /* * We can assume that @vma always points to a valid one and @end never -- 2.26.2