In preparation for changing the way we initialize the permanent ID map, update cpu_replace_ttbr1() so we can use it with the initial ID map as well. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/include/asm/mmu_context.h | 13 +++++++++---- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/suspend.c | 2 +- arch/arm64/mm/kasan_init.c | 4 ++-- arch/arm64/mm/mmu.c | 2 +- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7b387c3b312a..c7ccd82db1d2 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -105,13 +105,18 @@ static inline void cpu_uninstall_idmap(void) cpu_switch_mm(mm->pgd, mm); } -static inline void cpu_install_idmap(void) +static inline void __cpu_install_idmap(pgd_t *idmap) { cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); + cpu_switch_mm(lm_alias(idmap), &init_mm); +} + +static inline void cpu_install_idmap(void) +{ + __cpu_install_idmap(idmap_pg_dir); } /* @@ -142,7 +147,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) +static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; @@ -165,7 +170,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1)); - cpu_install_idmap(); + __cpu_install_idmap(idmap); replace_phys(ttbr1); cpu_uninstall_idmap(); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d72c4b4d389c..1661766f50f3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -3108,7 +3108,7 @@ subsys_initcall_sync(init_32bit_el0_mask); static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) { - cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); } /* diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 19ee7c33769d..40bf1551d1ad 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -52,7 +52,7 @@ void notrace __cpu_suspend_exit(void) /* Restore CnP bit in TTBR1_EL1 */ if (system_supports_cnp()) - cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); /* * PSTATE was not saved over suspend/resume, re-enable any detected diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index c12cd700598f..e969e68de005 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -236,7 +236,7 @@ static void __init kasan_init_shadow(void) */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); - cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); + cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -280,7 +280,7 @@ static void __init kasan_init_shadow(void) PAGE_KERNEL_RO)); memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); - cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); } static void __init kasan_init_depth(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0618ece00b7e..de171114a979 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -789,7 +789,7 @@ void __init paging_init(void) pgd_clear_fixmap(); - cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); init_mm.pgd = swapper_pg_dir; memblock_phys_free(__pa_symbol(init_pg_dir), -- 2.30.2