[RFC PATCH 2/2] rmap: remove parameter 'compound' from foeio_add_file_rmap_range()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Remove parameter 'compound' from folio_add_file_rmap_range().

The parameter nr_pages is checked whether same as folio page
numbers to know whether it's entire folio operation.

Convert the folio _entire_mapcount to page mapcount to handle
the case the folio is added as entire folio but removed by
removing each page.

Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx>
---
 include/linux/rmap.h |  2 +-
 mm/memory.c          |  2 +-
 mm/rmap.c            | 51 ++++++++++++++++++++++++++++----------------
 3 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 974124b41fee..00e2a229bb24 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -199,7 +199,7 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
 void page_add_file_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 void folio_add_file_rmap_range(struct folio *, unsigned long start,
-		unsigned int nr_pages, struct vm_area_struct *, bool compound);
+		unsigned int nr_pages, struct vm_area_struct *);
 void page_remove_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 
diff --git a/mm/memory.c b/mm/memory.c
index 51f8bd91d9f0..fff1de9ac233 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4270,7 +4270,7 @@ void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
 	pte_t entry;
 
 	if (!cow) {
-		folio_add_file_rmap_range(folio, start, nr, vma, false);
+		folio_add_file_rmap_range(folio, start, nr, vma);
 		add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
 	} else {
 		/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 3ab67b33094b..23cb6983bfe7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1308,24 +1308,23 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
  * @start:	The first page number in folio
  * @nr_pages:	The number of pages which will be mapped
  * @vma:	the vm area in which the mapping is added
- * @compound:	charge the page as compound or small page
  *
  * The page range of folio is defined by [first_page, first_page + nr_pages)
  *
  * The caller needs to hold the pte lock.
  */
 void folio_add_file_rmap_range(struct folio *folio, unsigned long start,
-			unsigned int nr_pages, struct vm_area_struct *vma,
-			bool compound)
+			unsigned int nr_pages, struct vm_area_struct *vma)
 {
 	atomic_t *mapped = &folio->_nr_pages_mapped;
 	unsigned int nr_pmdmapped = 0, first;
 	int nr = 0;
+	bool entire_map = folio_test_large(folio) &&
+				(nr_pages == folio_nr_pages(folio));
 
-	VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
 
 	/* Is page being mapped by PTE? Is this its first map to be added? */
-	if (likely(!compound)) {
+	if (likely(!entire_map)) {
 		struct page *page = folio_page(folio, start);
 
 		nr_pages = min_t(unsigned int, nr_pages,
@@ -1341,15 +1340,13 @@ void folio_add_file_rmap_range(struct folio *folio, unsigned long start,
 			if (first)
 				nr++;
 		} while (page++, --nr_pages > 0);
-	} else if (folio_test_pmd_mappable(folio)) {
-		/* That test is redundant: it's for safety or to optimize out */
+	} else {
 
 		first = atomic_inc_and_test(&folio->_entire_mapcount);
 		if (first) {
 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
-				nr_pmdmapped = folio_nr_pages(folio);
-				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+				nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
 				/* Raced ahead of a remove and another add? */
 				if (unlikely(nr < 0))
 					nr = 0;
@@ -1358,6 +1355,9 @@ void folio_add_file_rmap_range(struct folio *folio, unsigned long start,
 				nr = 0;
 			}
 		}
+
+		if (folio_test_pmd_mappable(folio))
+			nr_pmdmapped = nr_pages;
 	}
 
 	if (nr_pmdmapped)
@@ -1366,7 +1366,7 @@ void folio_add_file_rmap_range(struct folio *folio, unsigned long start,
 	if (nr)
 		__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
 
-	mlock_vma_folio(folio, vma, compound);
+	mlock_vma_folio(folio, vma, entire_map);
 }
 
 /**
@@ -1390,8 +1390,8 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
 	else
 		nr_pages = folio_nr_pages(folio);
 
-	folio_add_file_rmap_range(folio, folio_page_idx(folio, page),
-			nr_pages, vma, compound);
+	folio_add_file_rmap_range(folio,
+			folio_page_idx(folio, page), nr_pages, vma);
 }
 
 static void folio_remove_entire_rmap(struct folio *folio,
@@ -1448,15 +1448,30 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
 
 	/* Is page being unmapped by PTE? Is this its last map to be removed? */
 	if (likely(!compound)) {
-		last = atomic_add_negative(-1, &page->_mapcount);
-		nr = last;
-		if (last && folio_test_large(folio)) {
-			nr = atomic_dec_return_relaxed(mapped);
-			nr = (nr < COMPOUND_MAPPED);
+		if ((atomic_read(&page->_mapcount) == -1) &&
+				folio_test_large(folio)) {
+			int i, nr_pages = folio_nr_pages(folio);
+
+			for (i = 0; i < nr_pages; i++) {
+				struct page *pg = folio_page(folio, i);
+
+				if (pg != page)
+					atomic_inc(&pg->_mapcount);
+			}
+			folio_remove_entire_rmap(folio, &nr,
+						&nr_pmdmapped);
+
+			nr++;
+		} else {
+			last = atomic_add_negative(-1, &page->_mapcount);
+			nr = last;
+			if (last && folio_test_large(folio)) {
+				nr = atomic_dec_return_relaxed(mapped);
+				nr = (nr < COMPOUND_MAPPED);
+			}
 		}
 	} else if (folio_test_pmd_mappable(folio)) {
 		/* That test is redundant: it's for safety or to optimize out */
-
 		folio_remove_entire_rmap(folio, &nr, &nr_pmdmapped);
 	}
 
-- 
2.30.2





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux