From: Steven Price <steven.price@xxxxxxx> Subject: mm: pagewalk: add p4d_entry() and pgd_entry() pgd_entry() and pud_entry() were removed by commit 0b1fbfe50006c410 ("mm/pagewalk: remove pgd_entry() and pud_entry()") because there were no users. We're about to add users so reintroduce them, along with p4d_entry() as we now have 5 levels of tables. Note that commit a00cc7d9dd93d66a ("mm, x86: add support for PUD-sized transparent hugepages") already re-added pud_entry() but with different semantics to the other callbacks. Since there have never been upstream users of this, revert the semantics back to match the other callbacks. This means pud_entry() is called for all entries, not just transparent huge pages. Link: http://lkml.kernel.org/r/20191028135910.33253-12-steven.price@xxxxxxx Signed-off-by: Steven Price <steven.price@xxxxxxx> Tested-by: Zong Li <zong.li@xxxxxxxxxx> Cc: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx> Cc: Dave Jiang <dave.jiang@xxxxxxxxx> Cc: Albert Ou <aou@xxxxxxxxxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Alexandre Ghiti <alex@xxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David S. Miller <davem@xxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: James Hogan <jhogan@xxxxxxxxxx> Cc: James Morse <james.morse@xxxxxxx> Cc: "Liang, Kan" <kan.liang@xxxxxxxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxx> Cc: Paul Burton <paul.burton@xxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Shiraz Hashim <shashim@xxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/pagewalk.h | 19 +++++++++++++------ mm/pagewalk.c | 27 ++++++++++++++++----------- 2 files changed, 29 insertions(+), 17 deletions(-) --- a/include/linux/pagewalk.h~mm-pagewalk-add-p4d_entry-and-pgd_entry +++ a/include/linux/pagewalk.h @@ -8,15 +8,15 @@ struct mm_walk; /** * mm_walk_ops - callbacks for walk_page_range - * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry - * this handler should only handle pud_trans_huge() puds. - * the pmd_entry or pte_entry callbacks will be used for - * regular PUDs. - * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * @pgd_entry: if set, called for each non-empty PGD (top-level) entry + * @p4d_entry: if set, called for each non-empty P4D entry + * @pud_entry: if set, called for each non-empty PUD entry + * @pmd_entry: if set, called for each non-empty PMD entry * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to * split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_entry: if set, called for each non-empty PTE (lowest-level) + * entry * @pte_hole: if set, called for each hole at all levels * @hugetlb_entry: if set, called for each hugetlb entry * @test_walk: caller specific callback function to determine whether @@ -27,8 +27,15 @@ struct mm_walk; * @pre_vma: if set, called before starting walk on a non-null vma. * @post_vma: if set, called after a walk on a non-null vma, provided * that @pre_vma and the vma walk succeeded. + * + * p?d_entry callbacks are called even if those levels are folded on a + * particular architecture/configuration. */ struct mm_walk_ops { + int (*pgd_entry)(pgd_t *pgd, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*p4d_entry)(p4d_t *p4d, unsigned long addr, + unsigned long next, struct mm_walk *walk); int (*pud_entry)(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pmd_entry)(pmd_t *pmd, unsigned long addr, --- a/mm/pagewalk.c~mm-pagewalk-add-p4d_entry-and-pgd_entry +++ a/mm/pagewalk.c @@ -94,15 +94,9 @@ static int walk_pud_range(p4d_t *p4d, un } if (ops->pud_entry) { - spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); - - if (ptl) { - err = ops->pud_entry(pud, addr, next, walk); - spin_unlock(ptl); - if (err) - break; - continue; - } + err = ops->pud_entry(pud, addr, next, walk); + if (err) + break; } split_huge_pud(walk->vma, pud, addr); @@ -136,7 +130,12 @@ static int walk_p4d_range(pgd_t *pgd, un break; continue; } - if (ops->pmd_entry || ops->pte_entry) + if (ops->p4d_entry) { + err = ops->p4d_entry(p4d, addr, next, walk); + if (err) + break; + } + if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; @@ -163,7 +162,13 @@ static int walk_pgd_range(unsigned long break; continue; } - if (ops->pmd_entry || ops->pte_entry) + if (ops->pgd_entry) { + err = ops->pgd_entry(pgd, addr, next, walk); + if (err) + break; + } + if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || + ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; _