As all the VNCR-capable system registers are nicely separated from the rest of the crowd, let's set HCR_EL2.NV2 on and let the ball rolling. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/kvm_emulate.h | 23 +++++++++++++---------- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/hyp/vhe/switch.c | 14 +++++++++++++- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 0d88a7c51dec..37cd86aac727 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -14,6 +14,7 @@ /* Hyp Configuration Register (HCR) bits */ #define HCR_ATA (UL(1) << 56) #define HCR_FWB (UL(1) << 46) +#define HCR_NV2 (UL(1) << 45) #define HCR_AT (UL(1) << 44) #define HCR_NV1 (UL(1) << 43) #define HCR_NV (UL(1) << 42) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 44b395854430..3afe937b81f1 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -242,21 +242,24 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) static inline u64 __fixup_spsr_el2_write(struct kvm_cpu_context *ctxt, u64 val) { - if (!__vcpu_el2_e2h_is_set(ctxt)) { - /* - * Clear the .M field when writing SPSR to the CPU, so that we - * can detect when the CPU clobbered our SPSR copy during a - * local exception. - */ - val &= ~0xc; - } + struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + + if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt)) + return val; - return val; + /* + * Clear the .M field when writing SPSR to the CPU, so that we + * can detect when the CPU clobbered our SPSR copy during a + * local exception. + */ + return val &= ~0xc; } static inline u64 __fixup_spsr_el2_read(const struct kvm_cpu_context *ctxt, u64 val) { - if (__vcpu_el2_e2h_is_set(ctxt)) + struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + + if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt)) return val; /* diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d047954400d4..4cb55cf3c1d9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -487,6 +487,7 @@ #define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2) #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) +#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 4d80596e32a5..6d57d09b8503 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -48,7 +48,13 @@ static void __activate_traps(struct kvm_vcpu *vcpu) * the EL1 virtual memory control register accesses * as well as the AT S1 operations. */ - hcr |= HCR_TVM | HCR_TRVM | HCR_AT | HCR_TTLB | HCR_NV1; + if (enhanced_nested_virt_in_use(vcpu)) { + hcr &= ~HCR_TVM; + } else { + hcr |= HCR_TVM | HCR_TRVM | HCR_TTLB; + } + + hcr |= HCR_AT | HCR_NV1; } else { /* * For a guest hypervisor on v8.1 (VHE), allow to @@ -80,6 +86,12 @@ static void __activate_traps(struct kvm_vcpu *vcpu) if (!vcpu_el2_tge_is_set(vcpu)) hcr |= HCR_AT | HCR_TTLB; } + + if (enhanced_nested_virt_in_use(vcpu)) { + hcr |= HCR_AT | HCR_TTLB | HCR_NV2; + write_sysreg_s(vcpu->arch.ctxt.vncr_array, + SYS_VNCR_EL2); + } } else if (nested_virt_in_use(vcpu)) { hcr |= __vcpu_sys_reg(vcpu, HCR_EL2); } -- 2.29.2