do_set_pte_range() allows to setup page table entries for a specific range. It calls folio_add_file_rmap_range() to take advantage of batched rmap update for large folio. Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx> --- include/linux/mm.h | 3 +++ mm/filemap.c | 1 - mm/memory.c | 66 ++++++++++++++++++++++++++++++++-------------- 3 files changed, 49 insertions(+), 21 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d6f8f41514cc..93192f04b276 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1162,6 +1162,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); +void do_set_pte_range(struct vm_fault *vmf, struct folio *folio, + unsigned long addr, pte_t *pte, + unsigned long start, unsigned int nr); vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); diff --git a/mm/filemap.c b/mm/filemap.c index 1c37376fc8d5..6f110b9e5d27 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3376,7 +3376,6 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, ref_count++; do_set_pte(vmf, page, addr); - update_mmu_cache(vma, addr, vmf->pte); } while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages); /* Restore the vmf->pte */ diff --git a/mm/memory.c b/mm/memory.c index 7a04a1130ec1..51f8bd91d9f0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4257,36 +4257,65 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) } #endif -void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) +void do_set_pte_range(struct vm_fault *vmf, struct folio *folio, + unsigned long addr, pte_t *pte, + unsigned long start, unsigned int nr) { struct vm_area_struct *vma = vmf->vma; bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte); bool write = vmf->flags & FAULT_FLAG_WRITE; + bool cow = write && !(vma->vm_flags & VM_SHARED); bool prefault = vmf->address != addr; + struct page *page = folio_page(folio, start); pte_t entry; - flush_icache_page(vma, page); - entry = mk_pte(page, vma->vm_page_prot); + if (!cow) { + folio_add_file_rmap_range(folio, start, nr, vma, false); + add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); + } else { + /* + * rmap code is not ready to handle COW with anonymous + * large folio yet. Capture and warn if large folio + * is given. + */ + VM_WARN_ON_FOLIO(folio_test_large(folio), folio); + } - if (prefault && arch_wants_old_prefaulted_pte()) - entry = pte_mkold(entry); - else - entry = pte_sw_mkyoung(entry); + do { + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); - if (write) - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (unlikely(uffd_wp)) - entry = pte_mkuffd_wp(entry); - /* copy-on-write page */ - if (write && !(vma->vm_flags & VM_SHARED)) { + if (prefault && arch_wants_old_prefaulted_pte()) + entry = pte_mkold(entry); + else + entry = pte_sw_mkyoung(entry); + + if (write) + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (unlikely(uffd_wp)) + entry = pte_mkuffd_wp(entry); + set_pte_at(vma->vm_mm, addr, pte, entry); + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, addr, pte); + } while (pte++, page++, addr += PAGE_SIZE, --nr > 0); +} + +void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) +{ + struct folio *folio = page_folio(page); + struct vm_area_struct *vma = vmf->vma; + bool cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED); + + if (cow) { inc_mm_counter(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, addr); lru_cache_add_inactive_or_unevictable(page, vma); - } else { - inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); } - set_pte_at(vma->vm_mm, addr, vmf->pte, entry); + + do_set_pte_range(vmf, folio, addr, vmf->pte, + folio_page_idx(folio, page), 1); } static bool vmf_pte_changed(struct vm_fault *vmf) @@ -4361,9 +4390,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf) if (likely(!vmf_pte_changed(vmf))) { do_set_pte(vmf, page, vmf->address); - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, vmf->address, vmf->pte); - ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); -- 2.30.2