Re: [PATCH 0/5] Remove some races around folio_test_hugetlb

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 08.03.24 05:31, Matthew Wilcox wrote:
On Thu, Mar 07, 2024 at 09:14:50PM +0000, Matthew Wilcox wrote:
I suppose we could use a magic value for
page[0].mapcount to indicate hugetlb, but that'd make page_mapcount()
more complex.

Actually, not significantly.  I haven't tested this yet, but it should
work ... thoughts?  It's certainly the simplest implementation of
folio_test_hugetlb() that we've ever had.

Yes, yes, yes! So obvious and so simple!

1) In __folio_rmap_sanity_checks() we now VM_WARN_ON_ONCE we would get a hugetlb folio.

2) In folio_total_mapcount() we'll never read the subpage mapcount because folio_nr_pages_mapped() == 0.


__dump_folio() might require care to just ignore the subpage mpacount as well. Maybe we can even use something like "page_has_type" to never read subpage mapcounts, and read the entire mapcount only for the exception of hugetlb folios.

Apart from that, I did look at users of page_has_type():

1) validate_page_before_insert() now correctly rejects hugetlb folios, insert_page_into_pte_locked() -> folio_add_file_rmap_pte() would nowadays properly complain about that in __folio_rmap_sanity_checks() already.

2) kpagecount_read() will always end up with pcount=0. We either special-case hugetlb folios here as well, or just let this interface bitrot and hopefully go away at some point :)


Very nice!



diff --git a/include/linux/mm.h b/include/linux/mm.h
index f5a97dec5169..32281097f07e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1212,6 +1212,9 @@ static inline int page_mapcount(struct page *page)
  {
  	int mapcount = atomic_read(&page->_mapcount) + 1;
+ /* Head page repurposes mapcount; tail page leaves at -1 */
+	if (PageHeadHuge(page))
+		mapcount = 0;
  	if (unlikely(PageCompound(page)))
  		mapcount += folio_entire_mapcount(page_folio(page));
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 735cddc13d20..6ebb573c7195 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,7 +190,6 @@ enum pageflags {
/* At least one page in this folio has the hwpoison flag set */
  	PG_has_hwpoisoned = PG_error,
-	PG_hugetlb = PG_active,
  	PG_large_rmappable = PG_workingset, /* anon or file-backed */
  };
@@ -829,29 +828,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable) #define PG_head_mask ((1UL << PG_head)) -#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context.  Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-static inline bool folio_test_hugetlb(struct folio *folio)
-{
-	return folio_test_large(folio) &&
-		test_bit(PG_hugetlb, folio_flags(folio, 1));
-}
-#else
-TESTPAGEFLAG_FALSE(Huge, hugetlb)
-#endif
-
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  /*
   * PageHuge() only returns true for hugetlbfs pages, but not for
@@ -907,18 +883,6 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
  	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
  #endif
-/*
- * Check if a page is currently marked HWPoisoned. Note that this check is
- * best effort only and inherently racy: there is no way to synchronize with
- * failing hardware.
- */
-static inline bool is_page_hwpoison(struct page *page)
-{
-	if (PageHWPoison(page))
-		return true;
-	return PageHuge(page) && PageHWPoison(compound_head(page));
-}
-
  /*
   * For pages that are never mapped to userspace (and aren't PageSlab),
   * page_type may be used.  Because it is initialised to -1, we invert the
@@ -935,6 +899,7 @@ static inline bool is_page_hwpoison(struct page *page)
  #define PG_offline	0x00000100
  #define PG_table	0x00000200
  #define PG_guard	0x00000400
+#define PG_hugetlb	0x00000800
#define PageType(page, flag) \
  	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -1026,6 +991,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
   */
  PAGE_TYPE_OPS(Guard, guard, guard)
+#ifdef CONFIG_HUGETLB_PAGE
+PAGE_TYPE_OPS(HeadHuge, hugetlb, hugetlb)
+#else
+TESTPAGEFLAG_FALSE(HeadHuge, hugetlb)
+#endif
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(struct page *page)
+{
+	return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+	if (PageHWPoison(page))
+		return true;
+	return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
  extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1092,7 +1088,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
   */
  #define PAGE_FLAGS_SECOND						\
  	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
-	 1UL << PG_hugetlb		| 1UL << PG_large_rmappable)
+	 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
  	(1UL << PG_private | 1UL << PG_private_2)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ed1581b670d4..23b62df21971 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1623,7 +1623,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
  {
  	lockdep_assert_held(&hugetlb_lock);
- folio_clear_hugetlb(folio);
+	__folio_clear_hugetlb(folio);
  }
/*
@@ -1710,7 +1710,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
  		h->surplus_huge_pages_node[nid]++;
  	}
- folio_set_hugetlb(folio);
+	__folio_set_hugetlb(folio);
  	folio_change_private(folio, NULL);
  	/*
  	 * We have to set hugetlb_vmemmap_optimized again as above
@@ -2048,7 +2048,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
  {
-	folio_set_hugetlb(folio);
+	__folio_set_hugetlb(folio);
  	INIT_LIST_HEAD(&folio->lru);
  	hugetlb_set_folio_subpool(folio, NULL);
  	set_hugetlb_cgroup(folio, NULL);
@@ -2158,22 +2158,6 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
  	return __prep_compound_gigantic_folio(folio, order, true);
  }
-/*
- * PageHuge() only returns true for hugetlbfs pages, but not for normal or
- * transparent huge pages.  See the PageTransHuge() documentation for more
- * details.
- */
-int PageHuge(struct page *page)
-{
-	struct folio *folio;
-
-	if (!PageCompound(page))
-		return 0;
-	folio = page_folio(page);
-	return folio_test_hugetlb(folio);
-}
-EXPORT_SYMBOL_GPL(PageHuge);
-
  /*
   * Find and lock address space (mapping) in write mode.
   *


--
Cheers,

David / dhildenb





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux