From: Jintack Lim <jintack@xxxxxxxxxxxxxxx> Forward ELR_EL1, SPSR_EL1 and VBAR_EL1 traps to the virtual EL2 if the virtual HCR_EL2.{NV,NV1}={1,1}. This is for recursive nested virtualization. Signed-off-by: Jintack Lim <jintack@xxxxxxxxxxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/kvm_nested.h | 1 + arch/arm64/kvm/emulate-nested.c | 5 +++++ arch/arm64/kvm/sys_regs.c | 21 ++++++++++++++++++++- 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index e47faadbfde1..8e2b0bf1f484 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -20,6 +20,7 @@ #define HCR_AMVOFFEN (UL(1) << 51) #define HCR_FIEN (UL(1) << 47) #define HCR_FWB (UL(1) << 46) +#define HCR_NV1 (UL(1) << 43) #define HCR_NV (UL(1) << 42) #define HCR_API (UL(1) << 41) #define HCR_APK (UL(1) << 40) diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 04096113cdf0..4b7f9f35ece9 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -61,6 +61,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit); extern bool forward_nv_traps(struct kvm_vcpu *vcpu); +extern bool forward_nv1_traps(struct kvm_vcpu *vcpu); struct sys_reg_params; struct sys_reg_desc; diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 75cf6f15accc..7c06844a5113 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -34,6 +34,11 @@ bool forward_nv_traps(struct kvm_vcpu *vcpu) return forward_traps(vcpu, HCR_NV); } +bool forward_nv1_traps(struct kvm_vcpu *vcpu) +{ + return forward_traps(vcpu, HCR_NV1); +} + static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) { u64 mode = spsr & PSR_MODE_MASK; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 56cc60eede59..d40e5181bf74 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -357,6 +357,16 @@ static bool access_rw(struct kvm_vcpu *vcpu, return true; } +static bool access_vbar_el1(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (forward_nv1_traps(vcpu)) + return false; + + return access_rw(vcpu, p, r); +} + /* * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). */ @@ -1919,6 +1929,9 @@ static bool access_elr(struct kvm_vcpu *vcpu, if (el12_reg(p) && forward_nv_traps(vcpu)) return false; + if (!el12_reg(p) && forward_nv1_traps(vcpu)) + return false; + if (p->is_write) vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); else @@ -1934,6 +1947,9 @@ static bool access_spsr(struct kvm_vcpu *vcpu, if (el12_reg(p) && forward_nv_traps(vcpu)) return false; + if (!el12_reg(p) && forward_nv1_traps(vcpu)) + return false; + if (p->is_write) __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; else @@ -1949,6 +1965,9 @@ static bool access_spsr_el2(struct kvm_vcpu *vcpu, if (el12_reg(p) && forward_nv_traps(vcpu)) return false; + if (!el12_reg(p) && forward_nv1_traps(vcpu)) + return false; + if (p->is_write) vcpu_write_sys_reg(vcpu, p->regval, SPSR_EL2); else @@ -2163,7 +2182,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LORC_EL1), trap_loregion }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, - { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, + { SYS_DESC(SYS_VBAR_EL1), access_vbar_el1, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, -- 2.34.1