On Sun, Oct 30, 2022 at 03:47:36PM -0700, Linus Torvalds wrote: > include/linux/rmap.h | 1 + > mm/memory.c | 3 ++- > mm/rmap.c | 24 ++++++++++++++++++++++++ > 3 files changed, 27 insertions(+), 1 deletion(-) > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h > index bd3504d11b15..f62af001707c 100644 > --- a/include/linux/rmap.h > +++ b/include/linux/rmap.h > @@ -196,6 +196,7 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, > unsigned long address); > void page_add_file_rmap(struct page *, struct vm_area_struct *, > bool compound); > +void page_zap_pte_rmap(struct page *); > void page_remove_rmap(struct page *, struct vm_area_struct *, > bool compound); > > diff --git a/mm/memory.c b/mm/memory.c > index f88c351aecd4..c893f5ffc5a8 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1452,8 +1452,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, > likely(!(vma->vm_flags & VM_SEQ_READ))) > mark_page_accessed(page); > } > + page_zap_pte_rmap(page); > + munlock_vma_page(page, vma, false); > rss[mm_counter(page)]--; > - page_remove_rmap(page, vma, false); > if (unlikely(page_mapcount(page) < 0)) > print_bad_pte(vma, addr, ptent, page); > if (unlikely(__tlb_remove_page(tlb, page))) { > diff --git a/mm/rmap.c b/mm/rmap.c > index 2ec925e5fa6a..28b51a31ebb0 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1412,6 +1412,30 @@ static void page_remove_anon_compound_rmap(struct page *page) > __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); > } > > +/** > + * page_zap_pte_rmap - take down a pte mapping from a page > + * @page: page to remove mapping from > + * > + * This is the simplified form of page_remove_rmap(), that only > + * deals with last-level pages, so 'compound' is always false, > + * and the caller does 'munlock_vma_page(page, vma, compound)' > + * separately. > + * > + * This allows for a much simpler calling convention and code. > + * > + * The caller holds the pte lock. > + */ > +void page_zap_pte_rmap(struct page *page) > +{ One could consider adding something like: #ifdef USE_SPLIT_PTE_PTLOCKS lockdep_assert_held(ptlock_ptr(page)) #endif > + if (!atomic_add_negative(-1, &page->_mapcount)) > + return; > + > + lock_page_memcg(page); > + __dec_lruvec_page_state(page, > + PageAnon(page) ? NR_ANON_MAPPED : NR_FILE_MAPPED); > + unlock_page_memcg(page); > +} Took me a little while, but yes, .compound=false seems to reduce to this.