If we enter with the MMU on, there is no need for explicit cache invalidation for stores to memory, as they will be coherent with the caches. Let's take advantage of this, and create the ID map with the MMU still enabled if that is how we entered, and avoid any cache invalidation calls in that case. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/kernel/head.S | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 643797b21a1c..5de2ba3539a8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -89,9 +89,9 @@ SYM_CODE_START(primary_entry) bl record_mmu_state bl preserve_boot_args + bl create_idmap bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 - bl create_idmap /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for @@ -134,11 +134,13 @@ SYM_CODE_START_LOCAL(preserve_boot_args) stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy // needed before dc ivac with // MMU off add x1, x0, #0x20 // 4 x 8 bytes b dcache_inval_poc // tail call +0: ret SYM_CODE_END(preserve_boot_args) SYM_FUNC_START_LOCAL(clear_page_tables) @@ -375,12 +377,13 @@ SYM_FUNC_START_LOCAL(create_idmap) * accesses (MMU disabled), invalidate those tables again to * remove any speculatively loaded cache lines. */ + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy adrp x0, init_idmap_pg_dir adrp x1, init_idmap_pg_end bl dcache_inval_poc - ret x28 +0: ret x28 SYM_FUNC_END(create_idmap) SYM_FUNC_START_LOCAL(create_kernel_mapping) -- 2.35.1