This set_ptes should work for most architectures. Those that need to set a special bit in their PTEs or have their PFNs located at a different offset from PAGE_SHIFT will need to override it. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/pgtable.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index c63cd44777ec..e1804d23e7c4 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1439,6 +1439,33 @@ static inline int pmd_protnone(pmd_t pmd) #endif /* CONFIG_MMU */ +#ifndef set_ptes +/** + * set_ptes - Map consecutive pages to a contiguous range of addresses. + * @mm: Address space to map the pages into. + * @addr: Address to map the first page at. + * @ptep: Page table pointer for the first entry. + * @pte: Page table entry for the first page. + * @nr: Number of pages to map. + * + * Context: The caller holds the page table lock. The PTEs all lie + * within a single PMD (and VMA, and folio). + */ +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + for (;;) { + set_pte_at(mm, addr, ptep, pte); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + /* This works for x86. Check how PTEs are encoded */ + pte = __pte(pte_val(pte) + PAGE_SIZE); + } +} +#endif + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifndef __PAGETABLE_P4D_FOLDED -- 2.35.1