Qian Cai <cai@xxxxxx> writes: > 1) offline is busted [1]. It looks like test_pages_in_a_zone() missed the same > pfn_section_valid() check. > > 2) powerpc booting is generating endless warnings [2]. In vmemmap_populated() at > arch/powerpc/mm/init_64.c, I tried to change PAGES_PER_SECTION to > PAGES_PER_SUBSECTION, but it alone seems not enough. > Can you check with this change on ppc64. I haven't reviewed this series yet. I did limited testing with change . Before merging this I need to go through the full series again. The vmemmap poplulate on ppc64 needs to handle two translation mode (hash and radix). With respect to vmemap hash doesn't setup a translation in the linux page table. Hence we need to make sure we don't try to setup a mapping for a range which is arleady convered by an existing mapping. diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index a4e17a979e45..15c342f0a543 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -88,16 +88,23 @@ static unsigned long __meminit vmemmap_section_start(unsigned long page) * which overlaps this vmemmap page is initialised then this page is * initialised already. */ -static int __meminit vmemmap_populated(unsigned long start, int page_size) +static bool __meminit vmemmap_populated(unsigned long start, int page_size) { unsigned long end = start + page_size; start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); - for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) - if (pfn_valid(page_to_pfn((struct page *)start))) - return 1; + for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) { - return 0; + struct mem_section *ms; + unsigned long pfn = page_to_pfn((struct page *)start); + + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + ms = __nr_to_section(pfn_to_section_nr(pfn)); + if (valid_section(ms)) + return true; + } + return false; } /*