Skip over all consecutive none ptes in do_zap_pte_range(), which helps optimize away need_resched() + force_break + incremental pte/addr increments etc. Suggested-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- mm/memory.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index abe07e6bdd1bb..7f8869a22b57c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1665,17 +1665,30 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb, { pte_t ptent = ptep_get(pte); int max_nr = (end - addr) / PAGE_SIZE; + int nr = 0; - if (pte_none(ptent)) - return 1; + /* Skip all consecutive none ptes */ + if (pte_none(ptent)) { + for (nr = 1; nr < max_nr; nr++) { + ptent = ptep_get(pte + nr); + if (!pte_none(ptent)) + break; + } + max_nr -= nr; + if (!max_nr) + return nr; + pte += nr; + addr += nr * PAGE_SIZE; + } if (pte_present(ptent)) - return zap_present_ptes(tlb, vma, pte, ptent, max_nr, - addr, details, rss, force_flush, - force_break); + nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, + details, rss, force_flush, force_break); + else + nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, + details, rss); - return zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, - details, rss); + return nr; } static unsigned long zap_pte_range(struct mmu_gather *tlb, -- 2.20.1