The patch titled Subject: mm: pagewalk: fix walk for hugepage tables has been added to the -mm tree. Its filename is mm-pagewalk-fix-walk-for-hugepage-tables.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/mm-pagewalk-fix-walk-for-hugepage-tables.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/mm-pagewalk-fix-walk-for-hugepage-tables.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Subject: mm: pagewalk: fix walk for hugepage tables Pagewalk ignores hugepd entries and walk down the tables as if it was traditionnal entries, leading to crazy result. Add walk_hugepd_range() and use it to walk hugepage tables. Link: https://lkml.kernel.org/r/38d04410700c8d02f28ba37e020b62c55d6f3d2c.1624597695.git.christophe.leroy@xxxxxxxxxx Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Reviewed-by: Steven Price <steven.price@xxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Daniel Axtens <dja@xxxxxxxxxx> Cc: "Oliver O'Halloran" <oohall@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/pagewalk.c | 58 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 5 deletions(-) --- a/mm/pagewalk.c~mm-pagewalk-fix-walk-for-hugepage-tables +++ a/mm/pagewalk.c @@ -58,6 +58,45 @@ static int walk_pte_range(pmd_t *pmd, un return err; } +#ifdef CONFIG_ARCH_HAS_HUGEPD +static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, + unsigned long end, struct mm_walk *walk, int pdshift) +{ + int err = 0; + const struct mm_walk_ops *ops = walk->ops; + int shift = hugepd_shift(*phpd); + int page_size = 1 << shift; + + if (!ops->pte_entry) + return 0; + + if (addr & (page_size - 1)) + return 0; + + for (;;) { + pte_t *pte; + + spin_lock(&walk->mm->page_table_lock); + pte = hugepte_offset(*phpd, addr, pdshift); + err = ops->pte_entry(pte, addr, addr + page_size, walk); + spin_unlock(&walk->mm->page_table_lock); + + if (err) + break; + if (addr >= end - page_size) + break; + addr += page_size; + } + return err; +} +#else +static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, + unsigned long end, struct mm_walk *walk, int pdshift) +{ + return 0; +} +#endif + static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -108,7 +147,10 @@ again: goto again; } - err = walk_pte_range(pmd, addr, next, walk); + if (is_hugepd(__hugepd(pmd_val(*pmd)))) + err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); + else + err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); @@ -157,7 +199,10 @@ static int walk_pud_range(p4d_t *p4d, un if (pud_none(*pud)) goto again; - err = walk_pmd_range(pud, addr, next, walk); + if (is_hugepd(__hugepd(pud_val(*pud)))) + err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT); + else + err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); @@ -189,7 +234,9 @@ static int walk_p4d_range(pgd_t *pgd, un if (err) break; } - if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) + if (is_hugepd(__hugepd(p4d_val(*p4d)))) + err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT); + else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; @@ -224,8 +271,9 @@ static int walk_pgd_range(unsigned long if (err) break; } - if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || - ops->pte_entry) + if (is_hugepd(__hugepd(pgd_val(*pgd)))) + err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT); + else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; _ Patches currently in -mm which might be from christophe.leroy@xxxxxxxxxx are mm-pagewalk-fix-walk-for-hugepage-tables.patch mm-hugetlb-change-parameters-of-arch_make_huge_pte.patch mm-pgtable-add-stubs-for-pmd-pub_set-clear_huge.patch mm-pgtable-add-stubs-for-pmd-pub_set-clear_huge-fix-2.patch arm64-define-only-pud-pmd_set-clear_huge-when-usefull.patch mm-vmalloc-enable-mapping-of-huge-pages-at-pte-level-in-vmap.patch mm-vmalloc-enable-mapping-of-huge-pages-at-pte-level-in-vmalloc.patch powerpc-8xx-add-support-for-huge-pages-on-vmap-and-vmalloc.patch