[PATCH v3 3/5] rmap: cleanup exit path of try_to_unmap_one_page()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Cleanup exit path of try_to_unmap_one_page() by removing
some duplicated code.
Move page_vma_mapped_walk_done() back to try_to_unmap_one().
Change subpage to page as folio has no concept of subpage.

Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx>
---
 mm/rmap.c | 74 ++++++++++++++++++++++---------------------------------
 1 file changed, 30 insertions(+), 44 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index 013643122d0c..836cfc13cf9d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1528,7 +1528,7 @@ static bool try_to_unmap_one_hugetlb(struct folio *folio,
 	 *
 	 * See Documentation/mm/mmu_notifier.rst
 	 */
-	page_remove_rmap(&folio->page, vma, folio_test_hugetlb(folio));
+	page_remove_rmap(&folio->page, vma, true);
 	/* No VM_LOCKED set in vma->vm_flags for hugetlb. So not
 	 * necessary to call mlock_drain_local().
 	 */
@@ -1543,15 +1543,13 @@ static bool try_to_unmap_one_page(struct folio *folio,
 		struct page_vma_mapped_walk pvmw, unsigned long address,
 		enum ttu_flags flags)
 {
-	bool anon_exclusive, ret = true;
-	struct page *subpage;
+	bool anon_exclusive;
+	struct page *page;
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t pteval;
 
-	subpage = folio_page(folio,
-			pte_pfn(*pvmw.pte) - folio_pfn(folio));
-	anon_exclusive = folio_test_anon(folio) &&
-		PageAnonExclusive(subpage);
+	page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+	anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
 
 	flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
 	/* Nuke the page table entry. */
@@ -1579,15 +1577,14 @@ static bool try_to_unmap_one_page(struct folio *folio,
 	pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
 
 	/* Set the dirty flag on the folio now the pte is gone. */
-	if (pte_dirty(pteval))
+	if (pte_dirty(pteval) && !folio_test_dirty(folio))
 		folio_mark_dirty(folio);
 
 	/* Update high watermark before we lower rss */
 	update_hiwater_rss(mm);
 
-	if (PageHWPoison(subpage) && !(flags & TTU_HWPOISON)) {
-		pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
-		dec_mm_counter(mm, mm_counter(&folio->page));
+	if (PageHWPoison(page) && !(flags & TTU_HWPOISON)) {
+		pteval = swp_entry_to_pte(make_hwpoison_entry(page));
 		set_pte_at(mm, address, pvmw.pte, pteval);
 	} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
 		/*
@@ -1600,12 +1597,11 @@ static bool try_to_unmap_one_page(struct folio *folio,
 		 * migration) will not expect userfaults on already
 		 * copied pages.
 		 */
-		dec_mm_counter(mm, mm_counter(&folio->page));
 		/* We have to invalidate as we cleared the pte */
 		mmu_notifier_invalidate_range(mm, address,
 				address + PAGE_SIZE);
 	} else if (folio_test_anon(folio)) {
-		swp_entry_t entry = { .val = page_private(subpage) };
+		swp_entry_t entry = { .val = page_private(page) };
 		pte_t swp_pte;
 		/*
 		 * Store the swap location in the pte.
@@ -1614,12 +1610,10 @@ static bool try_to_unmap_one_page(struct folio *folio,
 		if (unlikely(folio_test_swapbacked(folio) !=
 					folio_test_swapcache(folio))) {
 			WARN_ON_ONCE(1);
-			ret = false;
 			/* We have to invalidate as we cleared the pte */
 			mmu_notifier_invalidate_range(mm, address,
 					address + PAGE_SIZE);
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit;
 		}
 
 		/* MADV_FREE page check */
@@ -1651,7 +1645,6 @@ static bool try_to_unmap_one_page(struct folio *folio,
 				/* Invalidate as we cleared the pte */
 				mmu_notifier_invalidate_range(mm,
 						address, address + PAGE_SIZE);
-				dec_mm_counter(mm, MM_ANONPAGES);
 				goto discard;
 			}
 
@@ -1659,43 +1652,30 @@ static bool try_to_unmap_one_page(struct folio *folio,
 			 * If the folio was redirtied, it cannot be
 			 * discarded. Remap the page to page table.
 			 */
-			set_pte_at(mm, address, pvmw.pte, pteval);
 			folio_set_swapbacked(folio);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
-		if (swap_duplicate(entry) < 0) {
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
-		}
+		if (swap_duplicate(entry) < 0)
+			goto exit_restore_pte;
+
 		if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
 		/* See page_try_share_anon_rmap(): clear PTE first. */
-		if (anon_exclusive &&
-				page_try_share_anon_rmap(subpage)) {
+		if (anon_exclusive && page_try_share_anon_rmap(page)) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
+
 		if (list_empty(&mm->mmlist)) {
 			spin_lock(&mmlist_lock);
 			if (list_empty(&mm->mmlist))
 				list_add(&mm->mmlist, &init_mm.mmlist);
 			spin_unlock(&mmlist_lock);
 		}
-		dec_mm_counter(mm, MM_ANONPAGES);
 		inc_mm_counter(mm, MM_SWAPENTS);
 		swp_pte = swp_entry_to_pte(entry);
 		if (anon_exclusive)
@@ -1706,8 +1686,7 @@ static bool try_to_unmap_one_page(struct folio *folio,
 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
 		set_pte_at(mm, address, pvmw.pte, swp_pte);
 		/* Invalidate as we cleared the pte */
-		mmu_notifier_invalidate_range(mm, address,
-				address + PAGE_SIZE);
+		mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
 	} else {
 		/*
 		 * This is a locked file-backed folio,
@@ -1720,11 +1699,16 @@ static bool try_to_unmap_one_page(struct folio *folio,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		dec_mm_counter(mm, mm_counter_file(&folio->page));
 	}
 
 discard:
-	return ret;
+	dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+	return true;
+
+exit_restore_pte:
+	set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+	return false;
 }
 
 /*
@@ -1802,8 +1786,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 					pte_pfn(*pvmw.pte) - folio_pfn(folio));
 		ret = try_to_unmap_one_page(folio, vma,
 						range, pvmw, address, flags);
-		if (!ret)
+		if (!ret) {
+			page_vma_mapped_walk_done(&pvmw);
 			break;
+		}
 
 		/*
 		 * No need to call mmu_notifier_invalidate_range() it has be
@@ -1812,7 +1798,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+		page_remove_rmap(subpage, vma, false);
 		if (vma->vm_flags & VM_LOCKED)
 			mlock_drain_local();
 		folio_put(folio);
-- 
2.30.2





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux