Honor the trap forwarding bits for both ERET and SMC, using a new helper that checks for common conditions. Reviewed-by: Joey Gouly <joey.gouly@xxxxxxx> Co-developed-by: Jintack Lim <jintack.lim@xxxxxxxxxx> Signed-off-by: Jintack Lim <jintack.lim@xxxxxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_nested.h | 1 + arch/arm64/kvm/emulate-nested.c | 27 +++++++++++++++++++++++++++ arch/arm64/kvm/handle_exit.c | 7 +++++++ 3 files changed, 35 insertions(+) diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index c77d795556e1..dbc4e3a67356 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -60,6 +60,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) return ttbr0 & ~GENMASK_ULL(63, 48); } +extern bool forward_smc_trap(struct kvm_vcpu *vcpu); int kvm_init_nv_sysregs(struct kvm *kvm); diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 4697ba41b3a9..2d80e81ae650 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -2117,6 +2117,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) return true; } +static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit) +{ + bool control_bit_set; + + if (!vcpu_has_nv(vcpu)) + return false; + + control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit; + if (!is_hyp_ctxt(vcpu) && control_bit_set) { + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return true; + } + return false; +} + +bool forward_smc_trap(struct kvm_vcpu *vcpu) +{ + return forward_traps(vcpu, HCR_TSC); +} + static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) { u64 mode = spsr & PSR_MODE_MASK; @@ -2155,6 +2175,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) u64 spsr, elr, mode; bool direct_eret; + /* + * Forward this trap to the virtual EL2 if the virtual + * HCR_EL2.NV bit is set and this is coming from !EL2. + */ + if (forward_traps(vcpu, HCR_NV)) + return; + /* * Going through the whole put/load motions is a waste of time * if this is a VHE guest hypervisor returning to its own diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 15221e481ccd..6a88ec024e2f 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu) static int handle_smc(struct kvm_vcpu *vcpu) { + /* + * Forward this trapped smc instruction to the virtual EL2 if + * the guest has asked for it. + */ + if (forward_smc_trap(vcpu)) + return 1; + /* * "If an SMC instruction executed at Non-secure EL1 is * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a -- 2.39.2