[RFC PATCH v2 12/18] arm64: head: avoid cache invalidation when entering with the MMU on

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If we enter with the MMU on, there is no need for explicit cache
invalidation for stores to memory, as they will be coherent with the
caches.

Let's take advantage of this, and create the ID map with the MMU still
enabled if that is how we entered, and avoid any cache invalidation
calls in that case.

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
---
 arch/arm64/kernel/head.S | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 836237289ffb..db315129f15d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -89,9 +89,9 @@
 SYM_CODE_START(primary_entry)
 	bl	record_mmu_state
 	bl	preserve_boot_args
+	bl	create_idmap
 	bl	init_kernel_el			// w0=cpu_boot_mode
 	bl	set_cpu_boot_mode_flag
-	bl	create_idmap
 
 	/*
 	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -124,11 +124,13 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
 	stp	x21, x1, [x0]			// x0 .. x3 at kernel entry
 	stp	x2, x3, [x0, #16]
 
+	cbnz	x22, 0f				// skip cache invalidation if MMU is on
 	dmb	sy				// needed before dc ivac with
 						// MMU off
 
 	add	x1, x0, #0x20			// 4 x 8 bytes
 	b	dcache_inval_poc		// tail call
+0:	ret
 SYM_CODE_END(preserve_boot_args)
 
 SYM_FUNC_START_LOCAL(clear_page_tables)
@@ -292,8 +294,10 @@ SYM_FUNC_START_LOCAL(create_idmap)
 1:
 	adr_l	x6, vabits_actual
 	str	x5, [x6]
+	cbnz	x22, 2f			// skip cache invalidation if MMU is on
 	dmb	sy
 	dc	ivac, x6		// Invalidate potentially stale cache line
+2:
 #endif
 	/*
 	 * VA_BITS may be too small to allow for an ID mapping to be created
@@ -311,13 +315,14 @@ SYM_FUNC_START_LOCAL(create_idmap)
 	adrp	x5, _end
 	clz	x5, x5
 	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
-	b.ge	1f			// .. then skip VA range extension
+	b.ge	4f			// .. then skip VA range extension
 
 	adr_l	x6, idmap_t0sz
 	str	x5, [x6]
+	cbnz	x22, 3f			// skip cache invalidation if MMU is on
 	dmb	sy
 	dc	ivac, x6		// Invalidate potentially stale cache line
-
+3:
 #if (VA_BITS < 48)
 #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
 #define EXTRA_PTRS	(1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
@@ -343,7 +348,7 @@ SYM_FUNC_START_LOCAL(create_idmap)
 	 */
 	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
 #endif
-1:
+4:
 	adr_l	x6, _end			// __pa(_end)
 	mov	x7, SWAPPER_MM_MMUFLAGS
 
@@ -354,11 +359,13 @@ SYM_FUNC_START_LOCAL(create_idmap)
 	 * accesses (MMU disabled), invalidate those tables again to
 	 * remove any speculatively loaded cache lines.
 	 */
+	cbnz	x22, 5f			// skip cache invalidation if MMU is on
 	dmb	sy
 
 	adrp	x0, idmap_pg_dir
 	adrp	x1, idmap_pg_end
 	b	dcache_inval_poc		// tail call
+5:	ret
 SYM_FUNC_END(create_idmap)
 
 SYM_FUNC_START_LOCAL(create_kernel_mapping)
-- 
2.30.2




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux