Now that the architectures are converted over to pte_advance_pfn(), we can remove the pte_next_pfn() wrapper and convert the callers to call pte_advance_pfn(). Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx> --- include/linux/pgtable.h | 9 +-------- mm/memory.c | 4 ++-- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 815d92dcb96b..50f32cccbd92 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -212,19 +212,12 @@ static inline int pmd_dirty(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif - -#ifndef pte_next_pfn #ifndef pte_advance_pfn static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } #endif -static inline pte_t pte_next_pfn(pte_t pte) -{ - return pte_advance_pfn(pte, 1); -} -#endif #ifndef set_ptes /** @@ -256,7 +249,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, if (--nr == 0) break; ptep++; - pte = pte_next_pfn(pte); + pte = pte_advance_pfn(pte, 1); } arch_leave_lazy_mmu_mode(); } diff --git a/mm/memory.c b/mm/memory.c index 38a010c4d04d..65fbe4f886c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -988,7 +988,7 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; - pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags); + pte_t expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, 1), flags); pte_t *ptep = start_ptep + 1; bool writable; @@ -1017,7 +1017,7 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (any_writable) *any_writable |= writable; - expected_pte = pte_next_pfn(expected_pte); + expected_pte = pte_advance_pfn(expected_pte, 1); ptep++; } -- 2.25.1