With isolate_movable_folio() and folio_isolate_lru(), let's use more folio in do_migrate_range() to save compound_head() calls. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/memory_hotplug.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a444e2d7dd2b..bd207772c619 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1774,14 +1774,14 @@ static int scan_movable_pages(unsigned long start, unsigned long end, static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { + struct folio *folio; unsigned long pfn; - struct page *page, *head; LIST_HEAD(source); static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); for (pfn = start_pfn; pfn < end_pfn; pfn++) { - struct folio *folio; + struct page *page, *head; bool isolated; if (!pfn_valid(pfn)) @@ -1818,15 +1818,15 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We can skip free pages. And we can deal with pages on * LRU and non-lru movable pages. */ - if (PageLRU(page)) - isolated = isolate_lru_page(page); + if (folio_test_lru(folio)) + isolated = folio_isolate_lru(folio); else - isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); + isolated = isolate_movable_folio(folio, ISOLATE_UNEVICTABLE); if (isolated) { - list_add_tail(&page->lru, &source); - if (!__PageMovable(page)) - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); + list_add_tail(&folio->lru, &source); + if (!__folio_test_movable(folio)) + node_stat_add_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio)); } else { if (__ratelimit(&migrate_rs)) { @@ -1834,7 +1834,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) dump_page(page, "isolation failed"); } } - put_page(page); + folio_put(folio); } if (!list_empty(&source)) { nodemask_t nmask = node_states[N_MEMORY]; @@ -1846,9 +1846,9 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) /* * We have checked that migration range is on a single zone so - * we can use the nid of the first page to all the others. + * we can use the nid of the first folio to all the others. */ - mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); + mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru)); /* * try to allocate from a different node but reuse this node @@ -1861,11 +1861,11 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ret = migrate_pages(&source, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); if (ret) { - list_for_each_entry(page, &source, lru) { + list_for_each_entry(folio, &source, lru) { if (__ratelimit(&migrate_rs)) { pr_warn("migrating pfn %lx failed ret:%d\n", - page_to_pfn(page), ret); - dump_page(page, "migration failure"); + folio_pfn(folio), ret); + dump_page(&folio->page, "migration failure"); } } putback_movable_pages(&source); -- 2.27.0