[RFC PATCH 03/14] mm/khugepaged: add __do_collapse_huge_page() helper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



collapse_huge_page currently does: (1) possibly allocates a hugepage,
(2) charges the owning memcg, (3) swaps in swapped-out pages (4) the
actual collapse (copying of pages, installation of huge pmd), and (5)
some final memcg accounting in error path.

Separate out (4) so that it can be reused by itself later in the series.

Signed-off-by: Zach O'Keefe <zokeefe@xxxxxxxxxx>
---
 mm/khugepaged.c | 178 +++++++++++++++++++++++++++---------------------
 1 file changed, 100 insertions(+), 78 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 36fc0099c445..e3399a451662 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1058,85 +1058,23 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 	return true;
 }
 
-static void collapse_huge_page(struct mm_struct *mm,
-				   unsigned long address,
-				   struct page **hpage,
-				   int node, int referenced, int unmapped,
-				   int enforce_pte_scan_limits)
-{
-	LIST_HEAD(compound_pagelist);
-	pmd_t *pmd, _pmd;
+static int __do_collapse_huge_page(struct mm_struct *mm,
+				   struct vm_area_struct *vma,
+				   unsigned long address, pmd_t *pmd,
+				   struct page *new_page,
+				   int enforce_pte_scan_limits,
+				   int *isolated_out)
+{
+	pmd_t _pmd;
 	pte_t *pte;
 	pgtable_t pgtable;
-	struct page *new_page;
 	spinlock_t *pmd_ptl, *pte_ptl;
-	int isolated = 0, result = 0;
-	struct vm_area_struct *vma;
+	int isolated = 0, result = SCAN_SUCCEED;
 	struct mmu_notifier_range range;
-	gfp_t gfp;
-
-	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-
-	/* Only allocate from the target node */
-	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-
-	/*
-	 * Before allocating the hugepage, release the mmap_lock read lock.
-	 * The allocation can take potentially a long time if it involves
-	 * sync compaction, and we do not need to hold the mmap_lock during
-	 * that. We will recheck the vma after taking it again in write mode.
-	 */
-	mmap_read_unlock(mm);
-	new_page = khugepaged_alloc_page(hpage, gfp, node);
-	if (!new_page) {
-		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
-		goto out_nolock;
-	}
-
-	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
-		result = SCAN_CGROUP_CHARGE_FAIL;
-		goto out_nolock;
-	}
-	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
-
-	mmap_read_lock(mm);
-	result = hugepage_vma_revalidate(mm, address, &vma);
-	if (result) {
-		mmap_read_unlock(mm);
-		goto out_nolock;
-	}
-
-	pmd = mm_find_pmd(mm, address);
-	if (!pmd) {
-		result = SCAN_PMD_NULL;
-		mmap_read_unlock(mm);
-		goto out_nolock;
-	}
-
-	/*
-	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
-	 * If it fails, we release mmap_lock and jump out_nolock.
-	 * Continuing to collapse causes inconsistency.
-	 */
-	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
-						     pmd, referenced)) {
-		mmap_read_unlock(mm);
-		goto out_nolock;
-	}
+	LIST_HEAD(compound_pagelist);
 
-	mmap_read_unlock(mm);
-	/*
-	 * Prevent all access to pagetables with the exception of
-	 * gup_fast later handled by the ptep_clear_flush and the VM
-	 * handled by the anon_vma lock + PG_lock.
-	 */
-	mmap_write_lock(mm);
-	result = hugepage_vma_revalidate(mm, address, &vma);
-	if (result)
-		goto out_up_write;
-	/* check if the pmd is still valid */
-	if (mm_find_pmd(mm, address) != pmd)
-		goto out_up_write;
+	VM_BUG_ON(!new_page);
+	mmap_assert_write_locked(mm);
 
 	anon_vma_lock_write(vma->anon_vma);
 
@@ -1176,7 +1114,7 @@ static void collapse_huge_page(struct mm_struct *mm,
 		spin_unlock(pmd_ptl);
 		anon_vma_unlock_write(vma->anon_vma);
 		result = SCAN_FAIL;
-		goto out_up_write;
+		goto out;
 	}
 
 	/*
@@ -1208,11 +1146,95 @@ static void collapse_huge_page(struct mm_struct *mm,
 	set_pmd_at(mm, address, pmd, _pmd);
 	update_mmu_cache_pmd(vma, address, pmd);
 	spin_unlock(pmd_ptl);
+out:
+	if (isolated_out)
+		*isolated_out = isolated;
+	return result;
+}
 
-	*hpage = NULL;
 
-	khugepaged_pages_collapsed++;
-	result = SCAN_SUCCEED;
+static void collapse_huge_page(struct mm_struct *mm,
+			       unsigned long address,
+			       struct page **hpage,
+			       int node, int referenced, int unmapped,
+			       int enforce_pte_scan_limits)
+{
+	pmd_t *pmd;
+	struct page *new_page;
+	int isolated = 0, result = 0;
+	struct vm_area_struct *vma;
+	gfp_t gfp;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	/* Only allocate from the target node */
+	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
+
+	/*
+	 * Before allocating the hugepage, release the mmap_lock read lock.
+	 * The allocation can take potentially a long time if it involves
+	 * sync compaction, and we do not need to hold the mmap_lock during
+	 * that. We will recheck the vma after taking it again in write mode.
+	 */
+	mmap_read_unlock(mm);
+	new_page = khugepaged_alloc_page(hpage, gfp, node);
+	if (!new_page) {
+		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
+		goto out_nolock;
+	}
+
+	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
+		result = SCAN_CGROUP_CHARGE_FAIL;
+		goto out_nolock;
+	}
+	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
+
+	mmap_read_lock(mm);
+	result = hugepage_vma_revalidate(mm, address, &vma);
+	if (result) {
+		mmap_read_unlock(mm);
+		goto out_nolock;
+	}
+
+	pmd = mm_find_pmd(mm, address);
+	if (!pmd) {
+		result = SCAN_PMD_NULL;
+		mmap_read_unlock(mm);
+		goto out_nolock;
+	}
+
+	/*
+	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
+	 * If it fails, we release mmap_lock and jump out_nolock.
+	 * Continuing to collapse causes inconsistency.
+	 */
+	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
+						     pmd, referenced)) {
+		mmap_read_unlock(mm);
+		goto out_nolock;
+	}
+
+	mmap_read_unlock(mm);
+	/*
+	 * Prevent all access to pagetables with the exception of
+	 * gup_fast later handled by the ptep_clear_flush and the VM
+	 * handled by the anon_vma lock + PG_lock.
+	 */
+	mmap_write_lock(mm);
+
+	result = hugepage_vma_revalidate(mm, address, &vma);
+	if (result)
+		goto out_up_write;
+	/* check if the pmd is still valid */
+	if (mm_find_pmd(mm, address) != pmd)
+		goto out_up_write;
+
+	result = __do_collapse_huge_page(mm, vma, address, pmd, new_page,
+					 enforce_pte_scan_limits, &isolated);
+	if (result == SCAN_SUCCEED) {
+		*hpage = NULL;
+		khugepaged_pages_collapsed++;
+	}
 out_up_write:
 	mmap_write_unlock(mm);
 out_nolock:
-- 
2.35.1.616.g0bdcbb4464-goog





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux