Ensure that reserved map entries with the no-map flag are marked as uncached and non-execute during the early MMU initialization. Signed-off-by: Rouven Czerwinski <r.czerwinski@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu-early.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm/cpu/mmu-early.c b/arch/arm/cpu/mmu-early.c index b985aa455f..9c3f01326b 100644 --- a/arch/arm/cpu/mmu-early.c +++ b/arch/arm/cpu/mmu-early.c @@ -3,9 +3,11 @@ #include <errno.h> #include <linux/sizes.h> #include <asm/memory.h> +#include <asm-generic/memory_layout.h> #include <asm/system.h> #include <asm/cache.h> #include <asm-generic/sections.h> +#include <pbl.h> #include "mmu.h" @@ -24,7 +26,10 @@ static inline void map_region(unsigned long start, unsigned long size, void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long _ttb) { + const struct pbl_reserved_memory *res_mem; + int i; ttb = (uint32_t *)_ttb; + res_mem = get_pbl_reserved_memory(); arm_set_cache_functions(); @@ -58,6 +63,14 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, /* maps main memory as cachable */ map_region(membase, memsize, PMD_SECT_DEF_CACHED); + for (i = 0; i < get_pbl_reserved_memory_num(); i++) { + if (res_mem->flags & FDT_RES_MEM_FLAG_NOMAP) + map_region(res_mem->base, res_mem->size, + PMD_SECT_DEF_UNCACHED | PMD_SECT_XN); + res_mem++; + } + + /* * With HAB enabled we call into the ROM code later in imx6_hab_get_status(). * Map the ROM cached which has the effect that the XN bit is not set. -- 2.32.0 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox