This is a note to let you know that I've just added the patch titled xtensa: fix high memory/reserved memory collision to the 4.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: xtensa-fix-high-memory-reserved-memory-collision.patch and it can be found in the queue-4.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 6ac5a11dc674bc5016ea716e8082fff61f524dc1 Mon Sep 17 00:00:00 2001 From: Max Filippov <jcmvbkbc@xxxxxxxxx> Date: Tue, 13 Feb 2018 15:31:05 -0800 Subject: xtensa: fix high memory/reserved memory collision From: Max Filippov <jcmvbkbc@xxxxxxxxx> commit 6ac5a11dc674bc5016ea716e8082fff61f524dc1 upstream. Xtensa memory initialization code frees high memory pages without checking whether they are in the reserved memory regions or not. That results in invalid value of totalram_pages and duplicate page usage by CMA and highmem. It produces a bunch of BUGs at startup looking like this: BUG: Bad page state in process swapper pfn:70800 page:be60c000 count:0 mapcount:-127 mapping: (null) index:0x1 flags: 0x80000000() raw: 80000000 00000000 00000001 ffffff80 00000000 be60c014 be60c014 0000000a page dumped because: nonzero mapcount Modules linked in: CPU: 0 PID: 1 Comm: swapper Tainted: G B 4.16.0-rc1-00015-g7928b2cbe55b-dirty #23 Stack: bd839d33 00000000 00000018 ba97b64c a106578c bd839d70 be60c000 00000000 a1378054 bd86a000 00000003 ba97b64c a1066166 bd839da0 be60c000 ffe00000 a1066b58 bd839dc0 be504000 00000000 000002f4 bd838000 00000000 0000001e Call Trace: [<a1065734>] bad_page+0xac/0xd0 [<a106578c>] free_pages_check_bad+0x34/0x4c [<a1066166>] __free_pages_ok+0xae/0x14c [<a1066b58>] __free_pages+0x30/0x64 [<a1365de5>] init_cma_reserved_pageblock+0x35/0x44 [<a13682dc>] cma_init_reserved_areas+0xf4/0x148 [<a10034b8>] do_one_initcall+0x80/0xf8 [<a1361c16>] kernel_init_freeable+0xda/0x13c [<a125b59d>] kernel_init+0x9/0xd0 [<a1004304>] ret_from_kernel_thread+0xc/0x18 Only free high memory pages that are not reserved. Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Max Filippov <jcmvbkbc@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/xtensa/mm/init.c | 70 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 7 deletions(-) --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -318,19 +318,75 @@ void __init zones_init(void) free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); } +#ifdef CONFIG_HIGHMEM +static void __init free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} + +static void __init free_highpages(void) +{ + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + reset_all_zones_managed_pages(); + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + if (memblock_is_nomap(mem)) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +} +#else +static void __init free_highpages(void) +{ +} +#endif + /* * Initialize memory pages. */ void __init mem_init(void) { -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - reset_all_zones_managed_pages(); - for (tmp = max_low_pfn; tmp < max_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -#endif + free_highpages(); max_mapnr = max_pfn - ARCH_PFN_OFFSET; high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); Patches currently in stable-queue which might be from jcmvbkbc@xxxxxxxxx are queue-4.4/xtensa-fix-high-memory-reserved-memory-collision.patch