Introduce and EXPORT a read-only counterpart to apply_to_page_range(). It only exposes the PTE value, not a pointer to the pagetables itself and is thus quite a bit safer to export. A number of apply_to_page_range() users can be converted to this primitive. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- include/linux/mm.h | 4 ++++ mm/memory.c | 24 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+) --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2876,6 +2876,10 @@ extern int apply_to_page_range(struct mm extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); +extern int verify_page_range(struct mm_struct *mm, + unsigned long addr, unsigned long size, + int (*fn)(pte_t pte, unsigned long addr, void *data), + void *data); extern void init_mem_debugging_and_hardening(void); #ifdef CONFIG_PAGE_POISONING --- a/mm/memory.c +++ b/mm/memory.c @@ -2559,6 +2559,30 @@ int apply_to_existing_page_range(struct return __apply_to_page_range(mm, addr, size, fn, data, false); } +struct vpr_data { + int (*fn)(pte_t pte, unsigned long addr, void *data); + void *data; +}; + +static int vpr_fn(pte_t *pte, unsigned long addr, void *data) +{ + struct vpr_data *vpr = data; + return vpr->fn(*pte, addr, vpr->data); +} + +int verify_page_range(struct mm_struct *mm, + unsigned long addr, unsigned long size, + int (*fn)(pte_t pte, unsigned long addr, void *data), + void *data) +{ + struct vpr_data vpr = { + .fn = fn, + .data = data, + }; + return apply_to_page_range(mm, addr, size, vpr_fn, &vpr); +} +EXPORT_SYMBOL_GPL(verify_page_range); + /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures