HFGITR_EL2 allows the trap of SVC instructions to EL2. Allow these traps to be forwarded. Take this opportunity to deny any 32bit activity when NV is enabled. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kvm/arm.c | 4 ++++ arch/arm64/kvm/handle_exit.c | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..8b51570a76f8 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -36,6 +36,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/kvm_nested.h> #include <asm/kvm_pkvm.h> #include <asm/kvm_emulate.h> #include <asm/sections.h> @@ -818,6 +819,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) if (likely(!vcpu_mode_is_32bit(vcpu))) return false; + if (vcpu_has_nv(vcpu)) + return true; + return !kvm_supports_32bit_el0(); } diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 6dcd6604b6bc..3b86d534b995 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -226,6 +226,17 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu) return 1; } +static int handle_svc(struct kvm_vcpu *vcpu) +{ + /* + * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a + * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so + * we should only have to deal with a 64 bit exception. + */ + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, @@ -239,6 +250,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_SMC32] = handle_smc, [ESR_ELx_EC_HVC64] = handle_hvc, [ESR_ELx_EC_SMC64] = handle_smc, + [ESR_ELx_EC_SVC64] = handle_svc, [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, [ESR_ELx_EC_SVE] = handle_sve, [ESR_ELx_EC_ERET] = kvm_handle_eret, -- 2.34.1