From: Nicolas Pitre <nicolas.pitre@xxxxxxxxxx> commit 6f16f4998f98e42e3f2dedf663cfb691ff0324af upstream. We currently use a temporary 1MB section aligned to a 1MB boundary for mapping the provided device tree until the final page table is created. However, if the device tree happens to cross that 1MB boundary, the end of it remains unmapped and the kernel crashes when it attempts to access it. Given no restriction on the location of that DTB, it could end up with only a few bytes mapped at the end of a section. Solve this issue by mapping two consecutive sections. Signed-off-by: Nicolas Pitre <nico@xxxxxxxxxx> Tested-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> Tested-by: Tomasz Figa <t.figa@xxxxxxxxxxx> Signed-off-by: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx> [bwh: Backported to 3.2: - Adjust context - The mapping is not conditional; drop the 'ne' suffixes] Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx> [yangyl: Backported to 3.4: Adjust context] Signed-off-by: Yang Yingliang <yangyingliang@xxxxxxxxxx> --- arch/arm/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3bf0c7f..72a63f1 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -254,6 +254,7 @@ __create_page_tables: /* * Then map boot params address in r2 or the first 1MB (2MB with LPAE) * of ram if boot params address is not specified. + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -262,6 +263,8 @@ __create_page_tables: add r3, r3, #PAGE_OFFSET add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orr r6, r7, r0 + str r6, [r3], #1 << PMD_ORDER + add r6, r6, #1 << SECTION_SHIFT str r6, [r3] #ifdef CONFIG_DEBUG_LL -- 1.8.0 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html