In the following two cases, the PTE page cannot be empty and cannot be reclaimed: 1. an uffd-wp pte was re-installed 2. should_zap_folio() return false Let's expose this information to the caller through is_pt_unreclaimable, so that subsequent commits can use this information in zap_pte_range() to detect whether the PTE page can be reclaimed. Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- mm/memory.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 534d9d52b5ebe..cc89ede8ce2ab 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1501,7 +1501,7 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, struct folio *folio, struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, unsigned long addr, struct zap_details *details, int *rss, - bool *force_flush, bool *force_break) + bool *force_flush, bool *force_break, bool *is_pt_unreclaimable) { struct mm_struct *mm = tlb->mm; bool delay_rmap = false; @@ -1527,8 +1527,8 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entries(tlb, pte, nr, addr); if (unlikely(userfaultfd_pte_wp(vma, ptent))) - zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, - ptent); + *is_pt_unreclaimable = + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); if (!delay_rmap) { folio_remove_rmap_ptes(folio, page, nr, vma); @@ -1552,7 +1552,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsigned int max_nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, - bool *force_break) + bool *force_break, bool *is_pt_unreclaimable) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; struct mm_struct *mm = tlb->mm; @@ -1567,15 +1567,18 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); if (userfaultfd_pte_wp(vma, ptent)) - zap_install_uffd_wp_if_needed(vma, addr, pte, 1, - details, ptent); + *is_pt_unreclaimable = + zap_install_uffd_wp_if_needed(vma, addr, pte, 1, + details, ptent); ksm_might_unmap_zero_page(mm, ptent); return 1; } folio = page_folio(page); - if (unlikely(!should_zap_folio(details, folio))) + if (unlikely(!should_zap_folio(details, folio))) { + *is_pt_unreclaimable = true; return 1; + } /* * Make sure that the common "small folio" case is as fast as possible @@ -1587,11 +1590,12 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, - force_break); + force_break, is_pt_unreclaimable); return nr; } zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, - details, rss, force_flush, force_break); + details, rss, force_flush, force_break, + is_pt_unreclaimable); return 1; } @@ -1622,6 +1626,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t ptent = ptep_get(pte); struct folio *folio; struct page *page; + bool is_pt_unreclaimable = false; int max_nr; nr = 1; @@ -1635,7 +1640,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, max_nr = (end - addr) / PAGE_SIZE; nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, details, rss, &force_flush, - &force_break); + &force_break, &is_pt_unreclaimable); if (unlikely(force_break)) { addr += nr * PAGE_SIZE; break; -- 2.20.1