Re: [RFC PATCH 15/26] hugetlb: make unmapping compatible with high-granularity mappings

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 24/06/22 11:06 pm, James Houghton wrote:
This enlightens __unmap_hugepage_range to deal with high-granularity
mappings. This doesn't change its API; it still must be called with
hugepage alignment, but it will correctly unmap hugepages that have been
mapped at high granularity.

Analogous to the mapcount rules introduced by hugetlb_no_page, we only
drop mapcount in this case if we are unmapping an entire hugepage in one
operation. This is the case when a VMA is destroyed.

Eventually, functionality here can be expanded to allow users to call
MADV_DONTNEED on PAGE_SIZE-aligned sections of a hugepage, but that is
not done here.

Sorry i may have misunderstood something here, but allowing something like

MADV_DONTNEED on PAGE_SIZE in hugetlbfs can cause fragmentation

in hugetlbfs pool which kind of looks opposite of prupose of hugetlbfs?


Signed-off-by: James Houghton <jthoughton@xxxxxxxxxx>
---
  include/asm-generic/tlb.h |  6 +--
  mm/hugetlb.c              | 85 ++++++++++++++++++++++++++-------------
  2 files changed, 59 insertions(+), 32 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index ff3e82553a76..8daa3ae460d9 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -562,9 +562,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
  	} while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+#define tlb_remove_huge_tlb_entry(tlb, hpte, address)	\
  	do {							\
-		unsigned long _sz = huge_page_size(h);		\
+		unsigned long _sz = hugetlb_pte_size(&hpte);	\
  		if (_sz >= P4D_SIZE)				\
  			tlb_flush_p4d_range(tlb, address, _sz);	\
  		else if (_sz >= PUD_SIZE)			\
@@ -573,7 +573,7 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
  			tlb_flush_pmd_range(tlb, address, _sz);	\
  		else						\
  			tlb_flush_pte_range(tlb, address, _sz);	\
-		__tlb_remove_tlb_entry(tlb, ptep, address);	\
+		__tlb_remove_tlb_entry(tlb, hpte.ptep, address);\
  	} while (0)
/**
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index da30621656b8..51fc1d3f122f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5120,24 +5120,20 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long address;
-	pte_t *ptep;
+	struct hugetlb_pte hpte;
  	pte_t pte;
  	spinlock_t *ptl;
-	struct page *page;
+	struct page *hpage, *subpage;
  	struct hstate *h = hstate_vma(vma);
  	unsigned long sz = huge_page_size(h);
  	struct mmu_notifier_range range;
  	bool force_flush = false;
+	bool hgm_enabled = hugetlb_hgm_enabled(vma);
WARN_ON(!is_vm_hugetlb_page(vma));
  	BUG_ON(start & ~huge_page_mask(h));
  	BUG_ON(end & ~huge_page_mask(h));
- /*
-	 * This is a hugetlb vma, all the pte entries should point
-	 * to huge page.
-	 */
-	tlb_change_page_size(tlb, sz);
  	tlb_start_vma(tlb, vma);
/*
@@ -5148,25 +5144,43 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
  	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
  	mmu_notifier_invalidate_range_start(&range);
  	address = start;
-	for (; address < end; address += sz) {
-		ptep = huge_pte_offset(mm, address, sz);
-		if (!ptep)
+
+	while (address < end) {
+		pte_t *ptep = huge_pte_offset(mm, address, sz);
+
+		if (!ptep) {
+			address += sz;
  			continue;
+		}
+		hugetlb_pte_populate(&hpte, ptep, huge_page_shift(h));
+		if (hgm_enabled) {
+			int ret = huge_pte_alloc_high_granularity(
+					&hpte, mm, vma, address, PAGE_SHIFT,
+					HUGETLB_SPLIT_NEVER,
+					/*write_locked=*/true);

I see huge_pte_alloc_high_granularity with HUGETLB_SPLIT_NEVER just

do huge_tlb_walk. So is HUGETLB_SPLIT_NEVER even required, i mean

for those cases you can directly do huge_tlb_walk? I mean name

huge_pte_alloc_high_granularity confuses for those cases.

+			/*
+			 * We will never split anything, so this should always
+			 * succeed.
+			 */
+			BUG_ON(ret);
+		}
- ptl = huge_pte_lock(h, mm, ptep);
-		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+		ptl = hugetlb_pte_lock(mm, &hpte);
+		if (!hgm_enabled && huge_pmd_unshare(
+					mm, vma, &address, hpte.ptep)) {
  			spin_unlock(ptl);
  			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
  			force_flush = true;
-			continue;
+			goto next_hpte;
  		}
- pte = huge_ptep_get(ptep);
-		if (huge_pte_none(pte)) {
+		if (hugetlb_pte_none(&hpte)) {
  			spin_unlock(ptl);
-			continue;
+			goto next_hpte;
  		}
+ pte = hugetlb_ptep_get(&hpte);
+
  		/*
  		 * Migrating hugepage or HWPoisoned hugepage is already
  		 * unmapped and its refcount is dropped, so just clear pte here.
@@ -5180,24 +5194,27 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
  			 */
  			if (pte_swp_uffd_wp_any(pte) &&
  			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
-				set_huge_pte_at(mm, address, ptep,
+				set_huge_pte_at(mm, address, hpte.ptep,
  						make_pte_marker(PTE_MARKER_UFFD_WP));
  			else
-				huge_pte_clear(mm, address, ptep, sz);
+				huge_pte_clear(mm, address, hpte.ptep,
+						hugetlb_pte_size(&hpte));
  			spin_unlock(ptl);
-			continue;
+			goto next_hpte;
  		}
- page = pte_page(pte);
+		subpage = pte_page(pte);
+		BUG_ON(!subpage);
+		hpage = compound_head(subpage);
  		/*
  		 * If a reference page is supplied, it is because a specific
  		 * page is being unmapped, not a range. Ensure the page we
  		 * are about to unmap is the actual page of interest.
  		 */
  		if (ref_page) {
-			if (page != ref_page) {
+			if (hpage != ref_page) {
  				spin_unlock(ptl);
-				continue;
+				goto next_hpte;
  			}
  			/*
  			 * Mark the VMA as having unmapped its page so that
@@ -5207,25 +5224,35 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
  			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  		}
- pte = huge_ptep_get_and_clear(mm, address, ptep);
-		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
+		pte = huge_ptep_get_and_clear(mm, address, hpte.ptep);
+		tlb_change_page_size(tlb, hugetlb_pte_size(&hpte));
+		tlb_remove_huge_tlb_entry(tlb, hpte, address);
  		if (huge_pte_dirty(pte))
-			set_page_dirty(page);
+			set_page_dirty(hpage);
  		/* Leave a uffd-wp pte marker if needed */
  		if (huge_pte_uffd_wp(pte) &&
  		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
-			set_huge_pte_at(mm, address, ptep,
+			set_huge_pte_at(mm, address, hpte.ptep,
  					make_pte_marker(PTE_MARKER_UFFD_WP));
-		hugetlb_count_sub(pages_per_huge_page(h), mm);
-		page_remove_rmap(page, vma, true);
+
+		hugetlb_count_sub(hugetlb_pte_size(&hpte)/PAGE_SIZE, mm);
+
+		/*
+		 * If we are unmapping the entire page, remove it from the
+		 * rmap.
+		 */
+		if (IS_ALIGNED(address, sz) && address + sz <= end)
+			page_remove_rmap(hpage, vma, true);
spin_unlock(ptl);
-		tlb_remove_page_size(tlb, page, huge_page_size(h));
+		tlb_remove_page_size(tlb, subpage, hugetlb_pte_size(&hpte));
  		/*
  		 * Bail out after unmapping reference page if supplied
  		 */
  		if (ref_page)
  			break;
+next_hpte:
+		address += hugetlb_pte_size(&hpte);
  	}
  	mmu_notifier_invalidate_range_end(&range);
  	tlb_end_vma(tlb, vma);




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux