Ensure that reserved map entries with the no-map flag are marked as uncached and non-execute during the early MMU initialization. Signed-off-by: Rouven Czerwinski <r.czerwinski@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu-early.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm/cpu/mmu-early.c b/arch/arm/cpu/mmu-early.c index b985aa455f..63cf61b2aa 100644 --- a/arch/arm/cpu/mmu-early.c +++ b/arch/arm/cpu/mmu-early.c @@ -3,9 +3,11 @@ #include <errno.h> #include <linux/sizes.h> #include <asm/memory.h> +#include <asm-generic/memory_layout.h> #include <asm/system.h> #include <asm/cache.h> #include <asm-generic/sections.h> +#include <pbl.h> #include "mmu.h" @@ -24,6 +26,8 @@ static inline void map_region(unsigned long start, unsigned long size, void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long _ttb) { + struct pbl_reserved_memory* res_mem = get_pbl_reserved_memory(); + int i; ttb = (uint32_t *)_ttb; arm_set_cache_functions(); @@ -58,6 +62,14 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, /* maps main memory as cachable */ map_region(membase, memsize, PMD_SECT_DEF_CACHED); + for(i=0; i < get_pbl_reserved_memory_num(); i++) { + if(res_mem->flag & FDT_RES_MEM_FLAG_NOMAP) + map_region(res_mem->base, res_mem->size, + PMD_SECT_DEF_UNCACHED | PMD_SECT_XN); + res_mem++; + } + + /* * With HAB enabled we call into the ROM code later in imx6_hab_get_status(). * Map the ROM cached which has the effect that the XN bit is not set. -- 2.31.1 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox