The early MMU code now uses pages to map the OP-TEE area non executable. This mapping is overwritten with sections in barebox proper. Refrain from doing so by using arch_remap_range() and bypassing reserved areas. Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu_32.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c index 705d27a045..47711bed35 100644 --- a/arch/arm/cpu/mmu_32.c +++ b/arch/arm/cpu/mmu_32.c @@ -522,9 +522,17 @@ void __mmu_init(bool mmu_on) vectors_init(); for_each_memory_bank(bank) { - create_sections(bank->start, bank->start + bank->size - 1, - PMD_SECT_DEF_CACHED); - __mmu_cache_flush(); + struct resource *rsv; + resource_size_t pos; + + pos = bank->start; + + for_each_reserved_region(bank, rsv) { + arch_remap_range((void *)pos, rsv->start - pos, MAP_CACHED); + pos = rsv->end + 1; + } + + arch_remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED); } } -- 2.39.2