In order to support batch mm counter updating in filemap_map_pages(), make set_pte_range() return the type of MM_COUNTERS and move mm counter updating out of set_pte_range(). Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- include/linux/mm.h | 4 ++-- mm/filemap.c | 10 +++++++--- mm/memory.c | 16 +++++++++++----- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b4046b1e63d..6ad440ac3706 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1366,8 +1366,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); -void set_pte_range(struct vm_fault *vmf, struct folio *folio, - struct page *page, unsigned int nr, unsigned long addr); +int set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); #endif diff --git a/mm/filemap.c b/mm/filemap.c index 92e2d43e4c9d..2274e590bab4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3512,6 +3512,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct page *page = folio_page(folio, start); unsigned int count = 0; pte_t *old_ptep = vmf->pte; + int type; do { if (PageHWPoison(page + count)) @@ -3539,7 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, continue; skip: if (count) { - set_pte_range(vmf, folio, page, count, addr); + type = set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, type, count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3553,7 +3555,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, } while (--nr_pages > 0); if (count) { - set_pte_range(vmf, folio, page, count, addr); + type = set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, type, count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3589,7 +3592,8 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, if (vmf->address == addr) ret = VM_FAULT_NOPAGE; - set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, + set_pte_range(vmf, folio, page, 1, addr), 1); folio_ref_inc(folio); return ret; diff --git a/mm/memory.c b/mm/memory.c index 78422d1c7381..485ffec9d4c7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4661,15 +4661,18 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * @page: The first page to create a PTE for. * @nr: The number of PTEs to create. * @addr: The first address to create a PTE for. + * + * Return: type of MM_COUNTERS to be updated */ -void set_pte_range(struct vm_fault *vmf, struct folio *folio, - struct page *page, unsigned int nr, unsigned long addr) +int set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr) { struct vm_area_struct *vma = vmf->vma; bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); bool write = vmf->flags & FAULT_FLAG_WRITE; bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; + int type; flush_icache_pages(vma, page, nr); entry = mk_pte(page, vma->vm_page_prot); @@ -4685,18 +4688,20 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); + type = MM_ANONPAGES; VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); + type = mm_counter_file(folio); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); + + return type; } static bool vmf_pte_changed(struct vm_fault *vmf) @@ -4765,8 +4770,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); + int type = set_pte_range(vmf, folio, page, 1, vmf->address); - set_pte_range(vmf, folio, page, 1, vmf->address); + add_mm_counter(vmf->vma->vm_mm, type, 1); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); -- 2.41.0