Since 48684a65b4e3: "mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP)", page_table_walk() will report any kernel area as a hole, because it lacks a vma. This means each arch has re-implemented page table walking when needed, for example in the per-arch ptdump walker. Remove the requirement to have a vma in the generic code and add a new function walk_page_range_novma() which ignores the VMAs and simply walks the page tables. Signed-off-by: Steven Price <steven.price@xxxxxxx> --- include/linux/pagewalk.h | 5 +++++ mm/pagewalk.c | 44 ++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 06790f23957f..2c9725bdcf1f 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -59,6 +59,7 @@ struct mm_walk_ops { * @ops: operation to call during the walk * @mm: mm_struct representing the target process of page table walk * @vma: vma currently walked (NULL if walking outside vmas) + * @no_vma: walk ignoring vmas (vma will always be NULL) * @private: private data for callbacks' usage * * (see the comment on walk_page_range() for more details) @@ -67,12 +68,16 @@ struct mm_walk { const struct mm_walk_ops *ops; struct mm_struct *mm; struct vm_area_struct *vma; + bool no_vma; void *private; }; int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); +int walk_page_range_novma(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private); int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private); int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, diff --git a/mm/pagewalk.c b/mm/pagewalk.c index c089786e7a7f..efa464cf079b 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -39,7 +39,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { again: next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || !walk->vma) { + if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, walk); if (err) @@ -62,9 +62,14 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, if (!ops->pte_entry) continue; - split_huge_pmd(walk->vma, pmd, addr); - if (pmd_trans_unstable(pmd)) - goto again; + if (walk->vma) { + split_huge_pmd(walk->vma, pmd, addr); + if (pmd_trans_unstable(pmd)) + goto again; + } else if (pmd_leaf(*pmd) || !pmd_present(*pmd)) { + continue; + } + err = walk_pte_range(pmd, addr, next, walk); if (err) break; @@ -85,7 +90,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { again: next = pud_addr_end(addr, end); - if (pud_none(*pud) || !walk->vma) { + if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, walk); if (err) @@ -99,9 +104,13 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, break; } - split_huge_pud(walk->vma, pud, addr); - if (pud_none(*pud)) - goto again; + if (walk->vma) { + split_huge_pud(walk->vma, pud, addr); + if (pud_none(*pud)) + goto again; + } else if (pud_leaf(*pud) || !pud_present(*pud)) { + continue; + } if (ops->pmd_entry || ops->pte_entry) err = walk_pmd_range(pud, addr, next, walk); @@ -374,6 +383,25 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, return err; } +int walk_page_range_novma(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .mm = mm, + .private = private, + .no_vma = true + }; + + if (start >= end || !walk.mm) + return -EINVAL; + + lockdep_assert_held(&walk.mm->mmap_sem); + + return __walk_page_range(start, end, &walk); +} + int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private) { -- 2.20.1