From: Will Deacon <will@xxxxxxxxxx> Allow the VMM to hook into and handle a subset of guest hypercalls advertised by the host. For now, no such hypercalls exist, and so the new capability returns 0 when queried. Signed-off-by: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 25 +++++++++++++++++++++++++ arch/arm64/kvm/hypercalls.c | 19 +++++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 55de71791233..f6187526685a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -325,6 +325,8 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + + u64 hypercall_exit_enabled; }; struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c0e683bde111..cd6c4df27c7b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -60,6 +60,9 @@ static bool vgic_present, kvm_arm_initialised; static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +/* KVM "vendor" hypercalls which may be forwarded to userspace on request. */ +#define KVM_EXIT_HYPERCALL_VALID_MASK (0) + bool is_kvm_arm_initialised(void) { return kvm_arm_initialised; @@ -123,6 +126,19 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->slots_lock); break; + case KVM_CAP_EXIT_HYPERCALL: + if (cap->flags) + return -EINVAL; + + if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) + return -EINVAL; + + if (cap->args[1] || cap->args[2] || cap->args[3]) + return -EINVAL; + + WRITE_ONCE(kvm->arch.hypercall_exit_enabled, cap->args[0]); + r = 0; + break; default: r = -EINVAL; break; @@ -334,6 +350,9 @@ static int kvm_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: r = BIT(0); break; + case KVM_CAP_EXIT_HYPERCALL: + r = KVM_EXIT_HYPERCALL_VALID_MASK; + break; default: r = 0; } @@ -1071,6 +1090,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = kvm_handle_mmio_return(vcpu); if (ret <= 0) return ret; + } else if (run->exit_reason == KVM_EXIT_HYPERCALL) { + smccc_set_retval(vcpu, + vcpu->run->hypercall.ret, + vcpu->run->hypercall.args[0], + vcpu->run->hypercall.args[1], + vcpu->run->hypercall.args[2]); } vcpu_load(vcpu); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 89b5b61bc9f7..5e04be7c026a 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -132,6 +132,25 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) } } +static int __maybe_unused kvm_vcpu_exit_hcall(struct kvm_vcpu *vcpu, u32 nr, u32 nr_args) +{ + u64 mask = vcpu->kvm->arch.hypercall_exit_enabled; + u32 i; + + if (nr_args > 6 || !(mask & BIT(nr))) { + smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0); + return 1; + } + + vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; + vcpu->run->hypercall.nr = nr; + + for (i = 0; i < nr_args; ++i) + vcpu->run->hypercall.args[i] = vcpu_get_reg(vcpu, i + 1); + + return 0; +} + #define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID #define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ -- 2.44.0.rc1.240.g4c46232300-goog