Now that all the handling is in place to deal with read-only page tables at runtime, do a pass over the kernel page tables at boot to remap all the page table pages read-only that were allocated early. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/mm/mmu.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 971501535757..b1212f6d48f2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -559,8 +559,23 @@ static void __init map_mem(pgd_t *pgdp) memblock_clear_nomap(kernel_start, kernel_end - kernel_start); } +static void mark_pgtables_ro(const pmd_t *pmdp, int level, int num_entries) +{ + while (num_entries--) { + if (pmd_valid(*pmdp) && pmd_table(*pmdp)) { + pmd_t *next = __va(__pmd_to_phys(*pmdp)); + + if (level < 2) + mark_pgtables_ro(next, level + 1, PTRS_PER_PMD); + set_pgtable_ro(next); + } + pmdp++; + } +} + void mark_rodata_ro(void) { + int pgd_level = 4 - CONFIG_PGTABLE_LEVELS; unsigned long section_size; /* @@ -571,6 +586,11 @@ void mark_rodata_ro(void) update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + mark_pgtables_ro((pmd_t *)&tramp_pg_dir, pgd_level, PTRS_PER_PGD); +#endif + mark_pgtables_ro((pmd_t *)&swapper_pg_dir, pgd_level, PTRS_PER_PGD); + debug_checkwx(); } -- 2.30.2 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm