This commit introduces skip_none_ptes() to skip over all consecutive none ptes in zap_pte_range(), which helps optimize away need_resched() + force_break + incremental pte/addr increments etc. Suggested-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- mm/memory.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index bd9ebe0f4471f..24633d0e1445a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1657,6 +1657,28 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, return nr; } +static inline int skip_none_ptes(pte_t *pte, unsigned long addr, + unsigned long end) +{ + pte_t ptent = ptep_get(pte); + int max_nr; + int nr; + + if (!pte_none(ptent)) + return 0; + + max_nr = (end - addr) / PAGE_SIZE; + nr = 1; + + for (; nr < max_nr; nr++) { + ptent = ptep_get(pte + nr); + if (!pte_none(ptent)) + break; + } + + return nr; +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, @@ -1682,13 +1704,17 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t ptent = ptep_get(pte); int max_nr; - nr = 1; - if (pte_none(ptent)) - continue; - if (need_resched()) break; + nr = skip_none_ptes(pte, addr, end); + if (nr) { + addr += PAGE_SIZE * nr; + if (addr == end) + break; + pte += nr; + } + max_nr = (end - addr) / PAGE_SIZE; if (pte_present(ptent)) { nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, -- 2.20.1