In preparation for reclaiming empty PTE pages, this commit first makes zap_pte_range() to handle the full within-PMD range, so that we can more easily detect and free PTE pages in this function in subsequent commits. Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- mm/memory.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index caa6ed0a7fe5b..fd57c0f49fce2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1602,6 +1602,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, swp_entry_t entry; int nr; +retry: tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1706,6 +1707,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (force_flush) tlb_flush_mmu(tlb); + if (addr != end) { + cond_resched(); + goto retry; + } + return addr; } @@ -1744,8 +1750,6 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, continue; } addr = zap_pte_range(tlb, vma, pmd, addr, next, details); - if (addr != next) - pmd--; } while (pmd++, cond_resched(), addr != end); return addr; -- 2.20.1