Use the information from the reserved memory entries to modify the mapping of memory regions to mark them as uncachable and not-executable. This also prevents the processor from speculating into these regions, preventing hard to debug scenarios where boots fail for unknown reasons. Signed-off-by: Rouven Czerwinski <r.czerwinski@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu.c | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c index 6388e1bf14..73f4cf5b36 100644 --- a/arch/arm/cpu/mmu.c +++ b/arch/arm/cpu/mmu.c @@ -17,6 +17,7 @@ #include <memory.h> #include <asm/system_info.h> #include <asm/sections.h> +#include <of.h> #include "mmu.h" @@ -407,6 +408,36 @@ static void vectors_init(void) create_vector_table(ARM_LOW_VECTORS); } +static void create_sections_with_intersect(struct memory_bank *bank) +{ + struct of_reserve_map *res_map; + unsigned long j_end; + unsigned long end; + unsigned long j; + u32 pmd_flags; + int i; + + res_map = of_get_reserve_map(); + if (!res_map) + return; + + end = bank->start + bank->size - 1; + + for (j = bank->start; j < end; j += PGDIR_SIZE) { + pmd_flags = PMD_SECT_DEF_CACHED; + j_end = j + PGDIR_SIZE - 1; + + for (i = 0; i < res_map->num_entries; i++) { + if ((BIT(i) & res_map->xn) && + j_end >= res_map->start[i] && + j_end <= res_map->end[i]) + pmd_flags = PMD_SECT_DEF_UNCACHED | PMD_SECT_XN; + } + + create_sections(ttb, j, j_end, pmd_flags); + } +} + /* * Prepare MMU for usage enable it. */ @@ -468,8 +499,7 @@ void __mmu_init(bool mmu_on) vectors_init(); for_each_memory_bank(bank) { - create_sections(ttb, bank->start, bank->start + bank->size - 1, - PMD_SECT_DEF_CACHED); + create_sections_with_intersect(bank); __mmu_cache_flush(); } -- 2.32.0 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox