It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE) from the offlining path, but should avoid triggering it from userspace, i.e, from is_mem_section_removable(). While at it, simplify the code a bit by removing an unnecessary jump label and a local variable, so set_migratetype_isolate() could really return a bool. Suggested-by: Michal Hocko <mhocko@xxxxxxxxxx> Signed-off-by: Qian Cai <cai@xxxxxx> --- mm/page_alloc.c | 11 ++++------- mm/page_isolation.c | 31 ++++++++++++++++++------------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 621716a25639..3c4eb750a199 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8231,7 +8231,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, if (is_migrate_cma(migratetype)) return NULL; - goto unmovable; + return page; } for (; iter < pageblock_nr_pages; iter++) { @@ -8241,7 +8241,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, page = pfn_to_page(pfn + iter); if (PageReserved(page)) - goto unmovable; + return page; /* * If the zone is movable and we have ruled out all reserved @@ -8261,7 +8261,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, unsigned int skip_pages; if (!hugepage_migration_supported(page_hstate(head))) - goto unmovable; + return page; skip_pages = compound_nr(head) - (page - head); iter += skip_pages - 1; @@ -8303,12 +8303,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, * is set to both of a memory hole page and a _used_ kernel * page at boot. */ - goto unmovable; + return page; } return NULL; -unmovable: - WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); - return pfn_to_page(pfn + iter); } #ifdef CONFIG_CONTIG_ALLOC diff --git a/mm/page_isolation.c b/mm/page_isolation.c index e70586523ca3..97f673d5fefa 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -15,12 +15,12 @@ #define CREATE_TRACE_POINTS #include <trace/events/page_isolation.h> -static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) +static bool set_migratetype_isolate(struct page *page, int migratetype, + int isol_flags) { - struct page *unmovable = NULL; + struct page *unmovable = ERR_PTR(-EBUSY); struct zone *zone; unsigned long flags; - int ret = -EBUSY; zone = page_zone(page); @@ -49,21 +49,26 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ NULL); __mod_zone_freepage_state(zone, -nr_pages, mt); - ret = 0; } out: spin_unlock_irqrestore(&zone->lock, flags); - if (!ret) + + if (!unmovable) { drain_all_pages(zone); - else if ((isol_flags & REPORT_FAILURE) && unmovable) - /* - * printk() with zone->lock held will guarantee to trigger a - * lockdep splat, so defer it here. - */ - dump_page(unmovable, "unmovable page"); - - return ret; + } else { + if (isol_flags & MEMORY_OFFLINE) + WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); + + if ((isol_flags & REPORT_FAILURE) && !IS_ERR(unmovable)) + /* + * printk() with zone->lock held will likely trigger a + * lockdep splat, so defer it here. + */ + dump_page(unmovable, "unmovable page"); + } + + return !!unmovable; } static void unset_migratetype_isolate(struct page *page, unsigned migratetype) -- 2.21.0 (Apple Git-122.2)