This is more efficient than calling page_folio() ourselves, and removes a call to PageHuge() which we're trying to get rid of. We do not hold the hugetlb_lock here, so the folio may be dissolved under us, but we check for that in hugetlb_isolate_or_dissolve() (which now takes a folio argument instead of converting to a folio itself). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/hugetlb.h | 4 ++-- mm/compaction.c | 16 ++++++++-------- mm/hugetlb.c | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 89f4b90eec68..436086a300f5 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -736,7 +736,7 @@ struct huge_bootmem_page { struct hstate *hstate; }; -int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +int hugetlb_isolate_or_dissolve(struct folio *folio, struct list_head *list); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1043,7 +1043,7 @@ static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, return NULL; } -static inline int isolate_or_dissolve_huge_page(struct page *page, +static inline int hugetlb_isolate_or_dissolve(struct folio *folio, struct list_head *list) { return -ENOMEM; diff --git a/mm/compaction.c b/mm/compaction.c index 807b58e6eb68..ad3430c22ede 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1001,17 +1001,18 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } - if (PageHuge(page)) { + folio = hugetlb_pfn_folio(low_pfn); + if (folio) { /* * skip hugetlbfs if we are not compacting for pages * bigger than its order. THPs and other compound pages * are handled below. */ if (!cc->alloc_contig) { - const unsigned int order = compound_order(page); + const unsigned int order = folio_order(folio); if (order <= MAX_PAGE_ORDER) { - low_pfn += (1UL << order) - 1; + low_pfn |= (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } goto isolate_fail; @@ -1022,7 +1023,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = NULL; } - ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); + ret = hugetlb_isolate_or_dissolve(folio, &cc->migratepages); /* * Fail isolation in case isolate_or_dissolve_huge_page() @@ -1032,17 +1033,16 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; - low_pfn += compound_nr(page) - 1; - nr_scanned += compound_nr(page) - 1; + low_pfn += folio_nr_pages(folio) - 1; + nr_scanned += folio_nr_pages(folio) - 1; goto isolate_fail; } - if (PageHuge(page)) { + if (folio_test_hugetlb(folio)) { /* * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ - folio = page_folio(page); low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2e6ebedb75a8..7832b0730e80 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3160,16 +3160,16 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, return ret; } -int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) +int hugetlb_isolate_or_dissolve(struct folio *folio, struct list_head *list) { struct hstate *h; - struct folio *folio = page_folio(page); int ret = -EBUSY; /* - * The page might have been dissolved from under our feet, so make sure - * to carefully check the state under the lock. - * Return success when racing as if we dissolved the page ourselves. + * The folio might have been dissolved from under our feet, + * so make sure to carefully check the state under the lock. + * Return success when racing as if we dissolved the folio + * ourselves. */ spin_lock_irq(&hugetlb_lock); if (folio_test_hugetlb(folio)) { -- 2.43.0