If we enter with the MMU and caches enabled, the caller may not have performed any cache maintenance. So clean the ID mapped page to the PoC, and invalidate the I-cache so we can safely execute from it after disabling the MMU and caches. Note that this means primary_entry() itself needs to be moved into the ID map as well, as we will return from init_kernel_el() with the MMU and caches off. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/kernel/head.S | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ec57a29f3f43..2f1dcc0c7594 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -75,7 +75,7 @@ __EFI_PE_HEADER - __INIT + .section ".idmap.text","awx" /* * The following callee saved general purpose registers are used on the @@ -91,6 +91,19 @@ SYM_CODE_START(primary_entry) bl record_mmu_state bl preserve_boot_args bl create_idmap + + /* + * If we entered with the MMU and caches on, clean the ID mapped part + * of the primary boot code to the PoC and invalidate it from the + * I-cache so we can safely turn them off. + */ + cbz x22, 0f + adrp x0, __idmap_text_start + adr_l x1, __idmap_text_end + sub x1, x1, x0 + bl dcache_clean_poc + ic ialluis +0: bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 @@ -104,6 +117,7 @@ SYM_CODE_START(primary_entry) b __primary_switch SYM_CODE_END(primary_entry) + __INIT SYM_CODE_START_LOCAL(record_mmu_state) mrs x22, CurrentEL cmp x22, #CurrentEL_EL2 -- 2.30.2