Use the information from the reserved memory entries to modify the mapping of memory regions to mark them as uncachable and not-executable. This also prevents the processor from speculating into these regions, preventing hard to debug scenarios where boots fail for unknown reasons. Signed-off-by: Rouven Czerwinski <r.czerwinski@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu.c | 60 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c index 6af228505d..71d6cad1ef 100644 --- a/arch/arm/cpu/mmu.c +++ b/arch/arm/cpu/mmu.c @@ -17,6 +17,7 @@ #include <memory.h> #include <asm/system_info.h> #include <asm/sections.h> +#include <of.h> #include "mmu.h" @@ -387,6 +388,57 @@ static void vectors_init(void) create_vector_table(ARM_LOW_VECTORS); } +static void create_sections_with_intersect(struct memory_bank *bank) +{ + int i; + unsigned flag; + struct of_reserve_map *res_map = of_get_reserve_map(); + unsigned long j_end; + unsigned long end; + unsigned long j; + + res_map = of_get_reserve_map(); + if (!res_map) + return; + + end = bank->start + bank->size - 1; + j = bank->start; + + while(j < end) { + flag = PMD_SECT_DEF_CACHED; + j_end = j + PGDIR_SIZE - 1; + + for (i = 0; i < res_map->num_entries; i++) { + if(BIT(i) & res_map->xn && (j_end >= res_map->start[i] && + j_end <= res_map->end[i])) + flag = PMD_SECT_DEF_UNCACHED | PMD_SECT_XN; + } + + create_sections(ttb, j, j_end, flag); + j += PGDIR_SIZE; + } +} + +static bool intersects_reserved_map_xn(struct memory_bank *bank) +{ + struct of_reserve_map *res_map; + unsigned long bank_end; + int i; + + res_map = of_get_reserve_map(); + + if (!res_map) + return false; + + bank_end = bank->start + bank->size; + for (i = 0; i < res_map->num_entries; i++) { + if(res_map->start[i] >= bank->start || res_map->end[i] <= bank_end) + return true; + } + return false; +} + + /* * Prepare MMU for usage enable it. */ @@ -448,8 +500,12 @@ void __mmu_init(bool mmu_on) vectors_init(); for_each_memory_bank(bank) { - create_sections(ttb, bank->start, bank->start + bank->size - 1, - PMD_SECT_DEF_CACHED); + if (intersects_reserved_map_xn(bank)) { + create_sections_with_intersect(bank); + } else { + create_sections(ttb, bank->start, bank->start + bank->size - 1, + PMD_SECT_DEF_CACHED); + } __mmu_cache_flush(); } -- 2.31.1 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox