if (!pvmw.pte) {
+ lazyfree = folio_test_anon(folio) && !folio_test_swapbacked(folio);
You've checked lazyfree here, so can we remove the duplicate check in
unmap_huge_pmd_locked()? Then the code should be:
if (lazyfree && unmap_huge_pmd_locked(...))
goto walk_done;
right. it seems unmap_huge_pmd_locked() only handles lazyfree pmd-mapped
thp. so i guess the code could be:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index aea49f7125f1..c4c3a7896de4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3131,11 +3131,10 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
- if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
- return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
-
- return false;
+ return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
}
static void remap_page(struct folio *folio, unsigned long nr, int flags)
diff --git a/mm/rmap.c b/mm/rmap.c
index 02c4e4b2cd7b..72907eb1b8fe 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1671,7 +1671,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
struct page *subpage;
- bool anon_exclusive, lazyfree, ret = true;
+ bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
int nr_pages = 1;
@@ -1724,18 +1724,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
}
if (!pvmw.pte) {
- lazyfree = folio_test_anon(folio) && !folio_test_swapbacked(folio);
-
- if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
- folio))
- goto walk_done;
- /*
- * unmap_huge_pmd_locked has either already marked
- * the folio as swap-backed or decided to retain it
- * due to GUP or speculative references.
- */
- if (lazyfree)
+ if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
+ if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
+ goto walk_done;
+ /*
+ * unmap_huge_pmd_locked has either already marked
+ * the folio as swap-backed or decided to retain it
+ * due to GUP or speculative references.
+ */
goto walk_abort;
+ }
if (flags & TTU_SPLIT_HUGE_PMD) {
/*
if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
folio))
goto walk_done;
+ /*
+ * unmap_huge_pmd_locked has either already marked
+ * the folio as swap-backed or decided to retain it
+ * due to GUP or speculative references.
+ */
+ if (lazyfree)
+ goto walk_abort;
if (flags & TTU_SPLIT_HUGE_PMD) {
/*