Re: [RFC 09/11] khugepaged: add mTHP support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 09/01/25 5:01 am, Nico Pache wrote:
Introduce the ability for khugepaged to collapse to different mTHP sizes.
While scanning a PMD range for potential hugepage collapse, track pages
in MIN_MTHP_ORDER chunks. Each bit represents a fully utilized region of
order MIN_MTHP_ORDER ptes.

With this bitmap we can determine which mTHP sizes would be the most
efficient to collapse to if the PMD collapse is not suitible.

Signed-off-by: Nico Pache <npache@xxxxxxxxxx>
---
  mm/khugepaged.c | 111 +++++++++++++++++++++++++++++++++---------------
  1 file changed, 77 insertions(+), 34 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index de1dc6ea3c71..4d3c560f20b4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1139,13 +1139,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  {
  	LIST_HEAD(compound_pagelist);
  	pmd_t *pmd, _pmd;
-	pte_t *pte;
+	pte_t *pte, mthp_pte;
  	pgtable_t pgtable;
  	struct folio *folio;
  	spinlock_t *pmd_ptl, *pte_ptl;
  	int result = SCAN_FAIL;
  	struct vm_area_struct *vma;
  	struct mmu_notifier_range range;
+	unsigned long _address = address + offset * PAGE_SIZE;
  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* if collapsing mTHPs we may have already released the read_lock, and
@@ -1162,12 +1163,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	mmap_read_unlock(mm);
  	*mmap_locked = false;
- result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
+	result = alloc_charge_folio(&folio, mm, cc, order);
  	if (result != SCAN_SUCCEED)
  		goto out_nolock;
mmap_read_lock(mm);
-	result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
+	*mmap_locked = true;
+	result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
  	if (result != SCAN_SUCCEED) {
  		mmap_read_unlock(mm);
  		goto out_nolock;
@@ -1185,13 +1187,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  		 * released when it fails. So we jump out_nolock directly in
  		 * that case.  Continuing to collapse causes inconsistency.
  		 */
-		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
-				referenced, HPAGE_PMD_ORDER);
+		result = __collapse_huge_page_swapin(mm, vma, _address, pmd,
+				referenced, order);
  		if (result != SCAN_SUCCEED)
  			goto out_nolock;
  	}
mmap_read_unlock(mm);
+	*mmap_locked = false;
  	/*
  	 * Prevent all access to pagetables with the exception of
  	 * gup_fast later handled by the ptep_clear_flush and the VM
@@ -1201,7 +1204,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	 * mmap_lock.
  	 */
  	mmap_write_lock(mm);
-	result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
+	result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
  	if (result != SCAN_SUCCEED)
  		goto out_up_write;
  	/* check if the pmd is still valid */
@@ -1212,11 +1215,12 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	vma_start_write(vma);
  	anon_vma_lock_write(vma->anon_vma);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
-				address + HPAGE_PMD_SIZE);
+	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, _address,
+				_address + (PAGE_SIZE << order));

Since we are nuking the PMD for both cases, we do not need to change it for order, this should remain address + HPAGE_PMD_SIZE.

  	mmu_notifier_invalidate_range_start(&range);
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
+
  	/*
  	 * This removes any huge TLB entry from the CPU so we won't allow
  	 * huge and small TLB entries for the same virtual address to
@@ -1230,10 +1234,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	mmu_notifier_invalidate_range_end(&range);
  	tlb_remove_table_sync_one();
- pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
+	pte = pte_offset_map_lock(mm, &_pmd, _address, &pte_ptl);
  	if (pte) {
-		result = __collapse_huge_page_isolate(vma, address, pte, cc,
-					&compound_pagelist, HPAGE_PMD_ORDER);
+		result = __collapse_huge_page_isolate(vma, _address, pte, cc,
+					&compound_pagelist, order);
  		spin_unlock(pte_ptl);
  	} else {
  		result = SCAN_PMD_NULL;
@@ -1262,8 +1266,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	anon_vma_unlock_write(vma->anon_vma);
result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
-					   vma, address, pte_ptl,
-					   &compound_pagelist, HPAGE_PMD_ORDER);
+					   vma, _address, pte_ptl,
+					   &compound_pagelist, order);
  	pte_unmap(pte);
  	if (unlikely(result != SCAN_SUCCEED))
  		goto out_up_write;
@@ -1274,20 +1278,37 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
  	 * write.
  	 */
  	__folio_mark_uptodate(folio);
-	pgtable = pmd_pgtable(_pmd);
-
-	_pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
-	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
-	spin_lock(pmd_ptl);
-	BUG_ON(!pmd_none(*pmd));
-	folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
-	folio_add_lru_vma(folio, vma);
-	pgtable_trans_huge_deposit(mm, pmd, pgtable);
-	set_pmd_at(mm, address, pmd, _pmd);
-	update_mmu_cache_pmd(vma, address, pmd);
-	deferred_split_folio(folio, false);
-	spin_unlock(pmd_ptl);
+	if (order == HPAGE_PMD_ORDER) {
+		pgtable = pmd_pgtable(_pmd);
+		_pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+		_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+
+		spin_lock(pmd_ptl);
+		BUG_ON(!pmd_none(*pmd));
+		folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+		folio_add_lru_vma(folio, vma);
+		pgtable_trans_huge_deposit(mm, pmd, pgtable);
+		set_pmd_at(mm, address, pmd, _pmd);
+		update_mmu_cache_pmd(vma, address, pmd);
+		deferred_split_folio(folio, false);
+		spin_unlock(pmd_ptl);
+	} else { //mTHP
+		mthp_pte = mk_pte(&folio->page, vma->vm_page_prot);
+		mthp_pte = maybe_mkwrite(pte_mkdirty(mthp_pte), vma);
+
+		spin_lock(pmd_ptl);
+		folio_ref_add(folio, (1 << order) - 1);
+		folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+		folio_add_lru_vma(folio, vma);
+		spin_lock(pte_ptl);
+		set_ptes(vma->vm_mm, _address, pte, mthp_pte, (1 << order));
+		update_mmu_cache_range(NULL, vma, _address, pte, (1 << order));
+		spin_unlock(pte_ptl);
+		smp_wmb(); /* make pte visible before pmd */
+		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+		deferred_split_folio(folio, false);
+		spin_unlock(pmd_ptl);
+	}

You have done lock nesting here: lock(pmd_ptl) -> lock(pte_ptl) -> unlock(pte_ptl) -> unlock(pmd_ptl). Anyways, you do not need to take pmd_ptl when you are setting the ptes. I am almost done with my v2, and according to me this function should look like this:

/* Similar to the PMD case except we have to batch set the PTEs */
static int vma_collapse_anon_folio(struct mm_struct *mm, unsigned long address,
		struct vm_area_struct *vma, struct collapse_control *cc, pmd_t *pmd,
		struct folio *folio, int order)
{
	LIST_HEAD(compound_pagelist);
	spinlock_t *pmd_ptl, *pte_ptl;
	int result = SCAN_FAIL;
	struct mmu_notifier_range range;
	pmd_t _pmd;
	pte_t *pte;
	pte_t entry;
	int nr_pages = folio_nr_pages(folio);
	unsigned long haddress = address & HPAGE_PMD_MASK;

	VM_BUG_ON(address & ((1UL << order) - 1));;

	mmap_read_unlock(mm);

	mmap_write_lock(mm);
	result = hugepage_vma_revalidate(mm, address, true, &vma, order, cc);
	if (result != SCAN_SUCCEED)
		goto out_up_write;
	result = check_pmd_still_valid(mm, address, pmd);
	if (result != SCAN_SUCCEED)
		goto out_up_write;

	vma_start_write(vma);
	anon_vma_lock_write(vma->anon_vma);

	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddress,
				haddress + HPAGE_PMD_SIZE);
	mmu_notifier_invalidate_range_start(&range);

	pmd_ptl = pmd_lock(mm, pmd);
	_pmd = pmdp_collapse_flush(vma, haddress, pmd);
	spin_unlock(pmd_ptl);
	mmu_notifier_invalidate_range_end(&range);
	tlb_remove_table_sync_one();

	pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
	if (pte) {
		result = __collapse_huge_page_isolate(vma, address, pte, cc,
						      &compound_pagelist, order);
		spin_unlock(pte_ptl);
	} else {
		result = SCAN_PMD_NULL;
	}

	if (unlikely(result != SCAN_SUCCEED)) {
		if (pte)
			pte_unmap(pte);
		spin_lock(pmd_ptl);
		BUG_ON(!pmd_none(*pmd));
		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
		spin_unlock(pmd_ptl);
		anon_vma_unlock_write(vma->anon_vma);
		goto out_up_write;
	}

	anon_vma_unlock_write(vma->anon_vma);

	__folio_mark_uptodate(folio);
	entry = mk_pte(&folio->page, vma->vm_page_prot);
	entry = maybe_mkwrite(pte_mkdirty(entry), vma);

	result = __collapse_huge_page_copy(pte, folio, pmd, *pmd,
					   vma, address, pte_ptl,
					   &compound_pagelist, order);
	pte_unmap(pte);
	if (unlikely(result != SCAN_SUCCEED))
		goto out_up_write;

	folio_ref_add(folio, nr_pages - 1);
	folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
	folio_add_lru_vma(folio, vma);
	spin_lock(pte_ptl);
	set_ptes(mm, address, pte, entry, nr_pages);
	spin_unlock(pte_ptl);
	spin_lock(pmd_ptl);

	/* See pmd_install() */
	smp_wmb();
	pmd_populate(mm, pmd, pmd_pgtable(_pmd));
	update_mmu_cache_pmd(vma, haddress, pmd);
	spin_unlock(pmd_ptl);

	result = SCAN_SUCCEED;
out_up_write:
	mmap_write_unlock(mm);
	return result;
}


The difference being, I take the pte_ptl, set the ptes, drop the pte_ptl, then take pmd_ptl, do pmd_populate(). Now, instead of update_mmu_cache_range() in the mTHP case, we still need to do update_mmu_cache_pmd() since we are repopulating the PMD. And, IIUC update_mmu_cache_pmd() is a superset of update_mmu_cache_range(), so we can drop the latter altogether.

folio = NULL; @@ -1367,21 +1388,26 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
  {
  	pmd_t *pmd;
  	pte_t *pte, *_pte;
+	int i;
  	int result = SCAN_FAIL, referenced = 0;
  	int none_or_zero = 0, shared = 0;
  	struct page *page = NULL;
  	struct folio *folio = NULL;
  	unsigned long _address;
+	unsigned long enabled_orders;
  	spinlock_t *ptl;
  	int node = NUMA_NO_NODE, unmapped = 0;
  	bool writable = false;
-
+	bool all_valid = true;
+	unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
result = find_pmd_or_thp_or_none(mm, address, &pmd);
  	if (result != SCAN_SUCCEED)
  		goto out;
+ bitmap_zero(cc->mthp_bitmap, 1 << (HPAGE_PMD_ORDER - MIN_MTHP_ORDER));
+	bitmap_zero(cc->mthp_bitmap_temp, 1 << (HPAGE_PMD_ORDER - MIN_MTHP_ORDER));
  	memset(cc->node_load, 0, sizeof(cc->node_load));
  	nodes_clear(cc->alloc_nmask);
  	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -1390,8 +1416,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
  		goto out;
  	}
- for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
-	     _pte++, _address += PAGE_SIZE) {
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		if (i % MIN_MTHP_NR == 0)
+			all_valid = true;
+
+		_pte = pte + i;
+		_address = address + i * PAGE_SIZE;
  		pte_t pteval = ptep_get(_pte);
  		if (is_swap_pte(pteval)) {
  			++unmapped;
@@ -1414,6 +1444,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
  			}
  		}
  		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+			all_valid = false;
  			++none_or_zero;
  			if (!userfaultfd_armed(vma) &&
  			    (!cc->is_khugepaged ||
@@ -1514,7 +1545,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
  		     folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
  								     address)))
  			referenced++;
+
+		/*
+		 * we are reading in MIN_MTHP_NR page chunks. if there are no empty
+		 * pages keep track of it in the bitmap for mTHP collapsing.
+		 */
+		if (all_valid && (i + 1) % MIN_MTHP_NR == 0)
+			bitmap_set(cc->mthp_bitmap, i / MIN_MTHP_NR, 1);
  	}
+
  	if (!writable) {
  		result = SCAN_PAGE_RO;
  	} else if (cc->is_khugepaged &&
@@ -1527,10 +1566,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
  out_unmap:
  	pte_unmap_unlock(pte, ptl);
  	if (result == SCAN_SUCCEED) {
-		result = collapse_huge_page(mm, address, referenced,
-					    unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
-		/* collapse_huge_page will return with the mmap_lock released */
-		*mmap_locked = false;
+		enabled_orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+			tva_flags, THP_ORDERS_ALL_ANON);
+		result = khugepaged_scan_bitmap(mm, address, referenced, unmapped, cc,
+			       mmap_locked, enabled_orders);
+		if (result > 0)
+			result = SCAN_SUCCEED;
  	}
  out:
  	trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
@@ -2477,11 +2518,13 @@ static int khugepaged_collapse_single_pmd(unsigned long addr, struct mm_struct *
  			fput(file);
  			if (result == SCAN_PTE_MAPPED_HUGEPAGE) {
  				mmap_read_lock(mm);
+				*mmap_locked = true;
  				if (khugepaged_test_exit_or_disable(mm))
  					goto end;
  				result = collapse_pte_mapped_thp(mm, addr,
  								 !cc->is_khugepaged);
  				mmap_read_unlock(mm);
+				*mmap_locked = false;
  			}
  		} else {
  			result = khugepaged_scan_pmd(mm, vma, addr,





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux