Move isolate_hugetlb() after grab a reference, and use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio isolation, which cleanup code a bit and save a few calls to compound_head(). Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/memory_hotplug.c | 48 +++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ccaf4c480aed..057037766efa 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1773,20 +1773,17 @@ static int scan_movable_pages(unsigned long start, unsigned long end, static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; - struct page *page, *head; LIST_HEAD(source); + struct folio *folio; static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); for (pfn = start_pfn; pfn < end_pfn; pfn++) { - struct folio *folio; - bool isolated; + struct page *page; if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); - folio = page_folio(page); - head = &folio->page; /* * HWPoison pages have elevated reference counts so the migration would @@ -1808,36 +1805,22 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) continue; } - if (PageHuge(page)) { - pfn = page_to_pfn(head) + compound_nr(head) - 1; - isolate_hugetlb(folio, &source); + folio = folio_get_nontail_page(page); + if (!folio) continue; - } else if (PageTransHuge(page)) - pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; - - if (!get_page_unless_zero(page)) - continue; - /* - * We can skip free pages. And we can deal with pages on - * LRU and non-lru movable pages. - */ - if (PageLRU(page)) - isolated = isolate_lru_page(page); - else - isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); - if (isolated) { - list_add_tail(&page->lru, &source); - if (!__PageMovable(page)) - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); - } else { + /* Skip free folios, deal with hugetlb, LRU and non-lru movable folios. */ + if (!isolate_folio_to_list(folio, &source)) { if (__ratelimit(&migrate_rs)) { pr_warn("failed to isolate pfn %lx\n", pfn); dump_page(page, "isolation failed"); } } - put_page(page); + + if (folio_test_large(folio)) + pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1; + + folio_put(folio); } if (!list_empty(&source)) { nodemask_t nmask = node_states[N_MEMORY]; @@ -1852,7 +1835,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We have checked that migration range is on a single zone so * we can use the nid of the first page to all the others. */ - mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); + mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru)); /* * try to allocate from a different node but reuse this node @@ -1865,11 +1848,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ret = migrate_pages(&source, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); if (ret) { - list_for_each_entry(page, &source, lru) { + list_for_each_entry(folio, &source, lru) { if (__ratelimit(&migrate_rs)) { pr_warn("migrating pfn %lx failed ret:%d\n", - page_to_pfn(page), ret); - dump_page(page, "migration failure"); + folio_pfn(folio), ret); + dump_page(&folio->page, + "migration failure"); } } putback_movable_pages(&source); -- 2.27.0