Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> --- mm/hugetlb.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0bf70f27c1ba..ba6d39b71cb1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1489,6 +1489,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio, for (i = 1; i < nr_pages; i++) { p = folio_page(folio, i); + p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE; p->mapping = NULL; clear_compound_head(p); if (!demote) @@ -1707,8 +1708,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, static void __update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio) { - int i; - struct page *subpage; bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) @@ -1750,14 +1749,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, spin_unlock_irq(&hugetlb_lock); } - for (i = 0; i < pages_per_huge_page(h); i++) { - subpage = folio_page(folio, i); - subpage->flags &= ~(1 << PG_locked | 1 << PG_error | - 1 << PG_referenced | 1 << PG_dirty | - 1 << PG_active | 1 << PG_private | - 1 << PG_writeback); - } - /* * Non-gigantic pages demoted from CMA allocated gigantic pages * need to be given back to CMA in free_gigantic_folio. -- 2.41.0