From: Steven Price <steven.price@xxxxxxx> Subject: mm: pagewalk: allow walking without vma Since 48684a65b4e3: "mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP)", page_table_walk() will report any kernel area as a hole, because it lacks a vma. This means each arch has re-implemented page table walking when needed, for example in the per-arch ptdump walker. Remove the requirement to have a vma in the generic code and add a new function walk_page_range_novma() which ignores the VMAs and simply walks the page tables. [steven.price@xxxxxxx: v15] Link: http://lkml.kernel.org/r/20191101140942.51554-13-steven.price@xxxxxxx [steven.price@xxxxxxx: fix boot crash] Link: http://lkml.kernel.org/r/20191028135910.33253-13-steven.price@xxxxxxx Cc: Zong Li <zong.li@xxxxxxxxxx> Cc: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> Cc: Shiraz Hashim <shashim@xxxxxxxxxxxxxx> Cc: Albert Ou <aou@xxxxxxxxxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Alexandre Ghiti <alex@xxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Dave Jiang <dave.jiang@xxxxxxxxx> Cc: David S. Miller <davem@xxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: James Hogan <jhogan@xxxxxxxxxx> Cc: James Morse <james.morse@xxxxxxx> Cc: "Liang, Kan" <kan.liang@xxxxxxxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxx> Cc: Paul Burton <paul.burton@xxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Qian Cai <cai@xxxxxx> Cc: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/pagewalk.h | 5 ++++ mm/pagewalk.c | 44 ++++++++++++++++++++++++++++++------- 2 files changed, 41 insertions(+), 8 deletions(-) --- a/include/linux/pagewalk.h~mm-pagewalk-allow-walking-without-vma +++ a/include/linux/pagewalk.h @@ -59,6 +59,7 @@ struct mm_walk_ops { * @ops: operation to call during the walk * @mm: mm_struct representing the target process of page table walk * @vma: vma currently walked (NULL if walking outside vmas) + * @no_vma: walk ignoring vmas (vma will always be NULL) * @private: private data for callbacks' usage * * (see the comment on walk_page_range() for more details) @@ -67,12 +68,16 @@ struct mm_walk { const struct mm_walk_ops *ops; struct mm_struct *mm; struct vm_area_struct *vma; + bool no_vma; void *private; }; int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); +int walk_page_range_novma(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private); int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private); int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, --- a/mm/pagewalk.c~mm-pagewalk-allow-walking-without-vma +++ a/mm/pagewalk.c @@ -39,7 +39,7 @@ static int walk_pmd_range(pud_t *pud, un do { again: next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || !walk->vma) { + if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, walk); if (err) @@ -62,9 +62,14 @@ again: if (!ops->pte_entry) continue; - split_huge_pmd(walk->vma, pmd, addr); - if (pmd_trans_unstable(pmd)) - goto again; + if (walk->vma) { + split_huge_pmd(walk->vma, pmd, addr); + if (pmd_trans_unstable(pmd)) + goto again; + } else if (pmd_leaf(*pmd) || !pmd_present(*pmd)) { + continue; + } + err = walk_pte_range(pmd, addr, next, walk); if (err) break; @@ -85,7 +90,7 @@ static int walk_pud_range(p4d_t *p4d, un do { again: next = pud_addr_end(addr, end); - if (pud_none(*pud) || !walk->vma) { + if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, walk); if (err) @@ -99,9 +104,13 @@ static int walk_pud_range(p4d_t *p4d, un break; } - split_huge_pud(walk->vma, pud, addr); - if (pud_none(*pud)) - goto again; + if (walk->vma) { + split_huge_pud(walk->vma, pud, addr); + if (pud_none(*pud)) + goto again; + } else if (pud_leaf(*pud) || !pud_present(*pud)) { + continue; + } if (ops->pmd_entry || ops->pte_entry) err = walk_pmd_range(pud, addr, next, walk); @@ -374,6 +383,25 @@ int walk_page_range(struct mm_struct *mm return err; } +int walk_page_range_novma(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .mm = mm, + .private = private, + .no_vma = true + }; + + if (start >= end || !walk.mm) + return -EINVAL; + + lockdep_assert_held(&walk.mm->mmap_sem); + + return __walk_page_range(start, end, &walk); +} + int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private) { _