Create a macro load_ttbr1 to avoid having to repeat the same instruction sequence 3 times in a subsequent patch. No functional change intended. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/kernel/head.S | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index eae147fabbee..e52429f9a135 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -697,6 +697,13 @@ SYM_FUNC_END(__secondary_too_slow) dc ivac, \tmp1 // Invalidate potentially stale cache line .endm + .macro load_ttbr1, reg, tmp + phys_to_ttbr \reg, \reg + offset_ttbr1 \reg, \tmp + msr ttbr1_el1, \reg + isb + .endm + /* * Enable the MMU. * @@ -718,12 +725,9 @@ SYM_FUNC_START(__enable_mmu) cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX b.gt __no_granule_support update_early_cpu_boot_status 0, x3, x4 - phys_to_ttbr x1, x1 phys_to_ttbr x2, x2 msr ttbr0_el1, x2 // load TTBR0 - offset_ttbr1 x1, x3 - msr ttbr1_el1, x1 // load TTBR1 - isb + load_ttbr1 x1, x3 set_sctlr_el1 x0 -- 2.30.2