[RFC PATCH v2 4/5] mm: add do_set_pte_range()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



do_set_pte_range() allows to setup page table entries for a
specific range. It calls page_add_file_rmap_range() to take
advantage of batched rmap update for large folio.

Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx>
---
 include/linux/mm.h |  2 ++
 mm/filemap.c       |  1 -
 mm/memory.c        | 60 ++++++++++++++++++++++++++++++++++++----------
 3 files changed, 49 insertions(+), 14 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index d6f8f41514cc..96e08fcdce24 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1162,6 +1162,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 
 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
+		unsigned long start, unsigned long addr, unsigned int nr);
 
 vm_fault_t finish_fault(struct vm_fault *vmf);
 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
diff --git a/mm/filemap.c b/mm/filemap.c
index 9cc5edd8f998..95f634d11581 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3386,7 +3386,6 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 
 		ref_count++;
 		do_set_pte(vmf, page, addr);
-		update_mmu_cache(vma, addr, vmf->pte);
 	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
 
 	/*
diff --git a/mm/memory.c b/mm/memory.c
index 51c04bb60724..7e41142e1e4f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4257,7 +4257,8 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
 }
 #endif
 
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+static void do_set_pte_entry(struct vm_fault *vmf, struct page *page,
+		unsigned long addr)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte);
@@ -4277,16 +4278,52 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 	if (unlikely(uffd_wp))
 		entry = pte_mkuffd_wp(entry);
-	/* copy-on-write page */
-	if (write && !(vma->vm_flags & VM_SHARED)) {
-		inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-		page_add_new_anon_rmap(page, vma, addr);
-		lru_cache_add_inactive_or_unevictable(page, vma);
-	} else {
-		inc_mm_counter(vma->vm_mm, mm_counter_file(page));
-		page_add_file_rmap(page, vma, false);
-	}
 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
+
+	/* no need to invalidate: a not-present page won't be cached */
+	update_mmu_cache(vma, addr, vmf->pte);
+}
+
+void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
+		unsigned long start, unsigned long addr, unsigned int nr)
+{
+	unsigned int i = 0;
+	struct page *page = folio_page(folio, start);
+	struct vm_area_struct *vma = vmf->vma;
+	bool cow = (vmf->flags & FAULT_FLAG_WRITE) &&
+			!(vma->vm_flags & VM_SHARED);
+
+	/*
+	 * file page: batched update rmap, mm counter.
+	 * copy-on-write page: batched update mm counter.
+	 */
+	if (!cow) {
+		page_add_file_rmap_range(folio, start, nr, vma, false);
+		add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
+	} else
+		add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
+
+	do {
+		if (cow) {
+			page_add_new_anon_rmap(page, vma, addr);
+			lru_cache_add_inactive_or_unevictable(page, vma);
+		}
+
+		do_set_pte_entry(vmf, page, addr);
+	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++i < nr);
+}
+
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+{
+	struct folio *folio = page_folio(page);
+
+	do_set_pte_range(vmf, folio, folio_page_idx(folio, page), addr, 1);
+
+	/*
+	 * do_set_pte_range changes vmf->pte. Restore it back as
+	 * do_set_pte doesn't expect the change of vmf->pte.
+	 */
+	vmf->pte--;
 }
 
 static bool vmf_pte_changed(struct vm_fault *vmf)
@@ -4361,9 +4398,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
 	if (likely(!vmf_pte_changed(vmf))) {
 		do_set_pte(vmf, page, vmf->address);
 
-		/* no need to invalidate: a not-present page won't be cached */
-		update_mmu_cache(vma, vmf->address, vmf->pte);
-
 		ret = 0;
 	} else {
 		update_mmu_tlb(vma, vmf->address, vmf->pte);
-- 
2.30.2





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux