Hi Alexandru,
On 2020-01-02 13:46, Alexandru Elisei wrote:
Add a function to disable VHE and another one to re-enable VHE. Both
functions work under the assumption that the CPU had VHE mode enabled
at
boot.
Minimal support to run with VHE has been added to the TLB invalidate
functions and to the exception handling code.
Since we're touch the assembly enable/disable MMU code, let's take this
opportunity to replace a magic number with the proper define.
I've been using this test case to debug my NV code... only to realize
after a few hours of banging my head on the wall that it is the test
that needed debugging, see below... ;-)
Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx>
---
lib/arm64/asm/mmu.h | 11 ++-
lib/arm64/asm/pgtable-hwdef.h | 53 ++++++++---
lib/arm64/asm/processor.h | 19 +++-
lib/arm64/processor.c | 37 +++++++-
arm/cstart64.S | 204
++++++++++++++++++++++++++++++++++++++++--
5 files changed, 300 insertions(+), 24 deletions(-)
[...]
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -104,6 +104,13 @@ exceptions_init:
.text
+exceptions_init_nvhe:
+ adrp x0, vector_table_nvhe
+ add x0, x0, :lo12:vector_table_nvhe
+ msr vbar_el2, x0
+ isb
+ ret
+
.globl get_mmu_off
get_mmu_off:
adrp x0, auxinfo
@@ -203,7 +210,7 @@ asm_mmu_enable:
TCR_IRGN_WBWA | TCR_ORGN_WBWA | \
TCR_SHARED
mrs x2, id_aa64mmfr0_el1
- bfi x1, x2, #32, #3
+ bfi x1, x2, #TCR_EL1_IPS_SHIFT, #3
msr tcr_el1, x1
/* MAIR */
@@ -228,6 +235,41 @@ asm_mmu_enable:
ret
+asm_mmu_enable_nvhe:
Note the "_nvhe" suffix, which implies that...
+ tlbi alle2
+ dsb nsh
+
+ /* TCR */
+ ldr x1, =TCR_EL2_RES1 | \
+ TCR_T0SZ(VA_BITS) | \
+ TCR_TG0_64K | \
+ TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | \
+ TCR_SH0_IS
+ mrs x2, id_aa64mmfr0_el1
+ bfi x1, x2, #TCR_EL2_PS_SHIFT, #3
+ msr tcr_el2, x1
+
+ /* Same MAIR and TTBR0 as in VHE mode */
+ ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
+ MAIR(0x04, MT_DEVICE_nGnRE) | \
+ MAIR(0x0c, MT_DEVICE_GRE) | \
+ MAIR(0x44, MT_NORMAL_NC) | \
+ MAIR(0xff, MT_NORMAL)
+ msr mair_el1, x1
... this should be mair_el2...
+
+ msr ttbr0_el1, x0
... and this should be ttbr0_el2.
+ isb
+
+ /* SCTLR */
+ ldr x1, =SCTLR_EL2_RES1 | \
+ SCTLR_EL2_C | \
+ SCTLR_EL2_I | \
+ SCTLR_EL2_M
+ msr sctlr_el2, x1
+ isb
+
+ ret
+
/* Taken with small changes from arch/arm64/incluse/asm/assembler.h */
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2
adrp \tmp1, dcache_line_size
@@ -242,21 +284,61 @@ asm_mmu_enable:
dsb \domain
.endm
+clean_inval_cache:
+ adrp x0, __phys_offset
+ ldr x0, [x0, :lo12:__phys_offset]
+ adrp x1, __phys_end
+ ldr x1, [x1, :lo12:__phys_end]
+ dcache_by_line_op civac, sy, x0, x1, x2, x3
+ isb
+ ret
+
.globl asm_mmu_disable
asm_mmu_disable:
mrs x0, sctlr_el1
bic x0, x0, SCTLR_EL1_M
msr sctlr_el1, x0
isb
+ b clean_inval_cache
- /* Clean + invalidate the entire memory */
- adrp x0, __phys_offset
- ldr x0, [x0, :lo12:__phys_offset]
- adrp x1, __phys_end
- ldr x1, [x1, :lo12:__phys_end]
- dcache_by_line_op civac, sy, x0, x1, x2, x3
+asm_mmu_disable_nvhe:
+ mrs x0, sctlr_el2
+ bic x0, x0, SCTLR_EL2_M
+ msr sctlr_el2, x0
+ isb
+ b clean_inval_cache
+
+.globl asm_disable_vhe
+asm_disable_vhe:
+ str x30, [sp, #-16]!
+
+ bl asm_mmu_disable
+ msr hcr_el2, xzr
+ isb
At this stage, VHE is off...
+ bl exceptions_init_nvhe
+ /* Make asm_mmu_enable_nvhe happy by having TTBR0 value in x0. */
+ mrs x0, ttbr0_el1
... so this is going to sample the wrong TTBR. It really should be
TTBR0_EL2!
+ isb
nit: this ISB is useless, as you will have a dependency on x0 anyway.
With these fixes (and a few more terrible hacks to synchronize HCR_EL2
on ARMv8.4-NV), I can run this test reliably.
Thanks,
M.
--
Jazz is not dead. It just smells funny...