ARM: mmu32: fix alignment when early remapping .text section arch_remap_range has a BUG_ON unaligned memory region start. On AM335x (Tested with Beaglebone Black), _stext for the second stage barebox.bin is not page-aligned, so this broke the boot. Use PAGE_ALIGN_DOWN to fix this. Signed-off-by: Ahmad Fatoum <a.fatoum@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c index dc4b0e414d57..852c5626dedd 100644 --- a/arch/arm/cpu/mmu_32.c +++ b/arch/arm/cpu/mmu_32.c @@ -583,7 +583,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize) /* maps main memory as cachable */ arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED); - arch_remap_range(_stext, PAGE_ALIGN(_etext - _stext), MAP_CACHED); + arch_remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED); __mmu_cache_on(); } -- 2.39.2