The patch titled Subject: mm, memory_hotplug: cleanup memory offline path has been added to the -mm tree. Its filename is mm-memory_hotplug-cleanup-memory-offline-path.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-memory_hotplug-cleanup-memory-offline-path.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-memory_hotplug-cleanup-memory-offline-path.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Michal Hocko <mhocko@xxxxxxxx> Subject: mm, memory_hotplug: cleanup memory offline path check_pages_isolated_cb currently accounts the whole pfn range as being offlined if test_pages_isolated suceeds on the range. This is based on the assumption that all pages in the range are freed which is currently the case in most cases but it won't be with later changes, as pages marked as vmemmap won't be isolated. Move the offlined pages counting to offline_isolated_pages_cb and rely on __offline_isolated_pages to return the correct value. check_pages_isolated_cb will still do it's primary job and check the pfn range. While we are at it remove check_pages_isolated and offline_isolated_pages and use directly walk_system_ram_range as do in online_pages. Link: http://lkml.kernel.org/r/20190408082633.2864-2-osalvador@xxxxxxx Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Michal Hocko <mhocko@xxxxxxxx> Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memory_hotplug.h | 3 +- mm/memory_hotplug.c | 45 +++++++------------------------ mm/page_alloc.c | 11 ++++++- 3 files changed, 22 insertions(+), 37 deletions(-) --- a/include/linux/memory_hotplug.h~mm-memory_hotplug-cleanup-memory-offline-path +++ a/include/linux/memory_hotplug.h @@ -87,7 +87,8 @@ extern int add_one_highpage(struct page extern int online_pages(unsigned long, unsigned long, int); extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, unsigned long *valid_start, unsigned long *valid_end); -extern void __offline_isolated_pages(unsigned long, unsigned long); +extern unsigned long __offline_isolated_pages(unsigned long start_pfn, + unsigned long end_pfn); typedef void (*online_page_callback_t)(struct page *page, unsigned int order); --- a/mm/memory_hotplug.c~mm-memory_hotplug-cleanup-memory-offline-path +++ a/mm/memory_hotplug.c @@ -1448,15 +1448,10 @@ static int offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) { - __offline_isolated_pages(start, start + nr_pages); - return 0; -} + unsigned long *offlined_pages = (unsigned long *)data; -static void -offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) -{ - walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, - offline_isolated_pages_cb); + *offlined_pages += __offline_isolated_pages(start, start + nr_pages); + return 0; } /* @@ -1466,26 +1461,7 @@ static int check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, void *data) { - int ret; - long offlined = *(long *)data; - ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); - offlined = nr_pages; - if (!ret) - *(long *)data += offlined; - return ret; -} - -static long -check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) -{ - long offlined = 0; - int ret; - - ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, - check_pages_isolated_cb); - if (ret < 0) - offlined = (long)ret; - return offlined; + return test_pages_isolated(start_pfn, start_pfn + nr_pages, true); } static int __init cmdline_parse_movable_node(char *p) @@ -1570,7 +1546,7 @@ static int __ref __offline_pages(unsigne unsigned long end_pfn) { unsigned long pfn, nr_pages; - long offlined_pages; + unsigned long offlined_pages = 0; int ret, node, nr_isolate_pageblock; unsigned long flags; unsigned long valid_start, valid_end; @@ -1646,14 +1622,15 @@ static int __ref __offline_pages(unsigne goto failed_removal_isolated; } /* check again */ - offlined_pages = check_pages_isolated(start_pfn, end_pfn); - } while (offlined_pages < 0); + ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, + NULL, check_pages_isolated_cb); + } while (ret); - pr_info("Offlined Pages %ld\n", offlined_pages); /* Ok, all of our target is isolated. We cannot do rollback at this point. */ - offline_isolated_pages(start_pfn, end_pfn); - + walk_system_ram_range(start_pfn, end_pfn - start_pfn, + &offlined_pages, offline_isolated_pages_cb); + pr_info("Offlined Pages %ld\n", offlined_pages); /* * Onlining will reset pagetype flags and makes migrate type * MOVABLE, so just need to decrease the number of isolated --- a/mm/page_alloc.c~mm-memory_hotplug-cleanup-memory-offline-path +++ a/mm/page_alloc.c @@ -8433,7 +8433,7 @@ void zone_pcp_reset(struct zone *zone) * All pages in the range must be in a single zone and isolated * before calling this. */ -void +unsigned long __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { struct page *page; @@ -8441,12 +8441,15 @@ __offline_isolated_pages(unsigned long s unsigned int order, i; unsigned long pfn; unsigned long flags; + unsigned long offlined_pages = 0; + /* find the first valid pfn */ for (pfn = start_pfn; pfn < end_pfn; pfn++) if (pfn_valid(pfn)) break; if (pfn == end_pfn) - return; + return offlined_pages; + offline_mem_sections(pfn, end_pfn); zone = page_zone(pfn_to_page(pfn)); spin_lock_irqsave(&zone->lock, flags); @@ -8464,12 +8467,14 @@ __offline_isolated_pages(unsigned long s if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { pfn++; SetPageReserved(page); + offlined_pages++; continue; } BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); order = page_order(page); + offlined_pages += 1 << order; #ifdef CONFIG_DEBUG_VM pr_info("remove from free list %lx %d %lx\n", pfn, 1 << order, end_pfn); @@ -8482,6 +8487,8 @@ __offline_isolated_pages(unsigned long s pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); + + return offlined_pages; } #endif _ Patches currently in -mm which might be from mhocko@xxxxxxxx are mm-memory_hotplug-cleanup-memory-offline-path.patch mm-memory_hotplug-provide-a-more-generic-restrictions-for-memory-hotplug.patch