This supports SDEI_EVENT_SIGNAL hypercall. It's used by the guest to inject SDEI event, whose number must be zero to the specified vCPU. As the routing mode and affinity isn't supported yet, the calling vCPU is assumed to be the target. The SDEI event 0x0 is a private one, with normal priority. It's usually used for testing. Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx> --- arch/arm64/kvm/sdei.c | 64 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c index a24270378305..ba2ca65c871b 100644 --- a/arch/arm64/kvm/sdei.c +++ b/arch/arm64/kvm/sdei.c @@ -726,6 +726,66 @@ static int do_inject_event(struct kvm_vcpu *vcpu, return 0; } +static unsigned long hypercall_signal(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_sdei_kvm *ksdei = kvm->arch.sdei; + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + struct kvm_sdei_exposed_event *exposed_event; + struct kvm_sdei_registered_event *registered_event; + unsigned long event_num = smccc_get_arg1(vcpu); + int index; + unsigned long ret = SDEI_SUCCESS; + + /* @event_num must be zero */ + if (!kvm_sdei_is_default(event_num)) { + ret = SDEI_INVALID_PARAMETERS; + goto out; + } + + spin_lock(&ksdei->lock); + + /* Check if the registered event exists */ + registered_event = find_registered_event(kvm, event_num); + if (!registered_event) { + ret = SDEI_INVALID_PARAMETERS; + goto unlock_kvm; + } + + /* + * Check if the event has been registered and enabled. The + * @target_pe parameter isn't checked for now and the event + * is assumed to injected the current vcpu. We should fix it + * when the routing mode and affinity are supported. + */ + exposed_event = registered_event->exposed_event; + index = kvm_sdei_vcpu_index(vcpu, exposed_event); + if (!kvm_sdei_is_registered(registered_event, index) || + !kvm_sdei_is_enabled(registered_event, index) || + kvm_sdei_is_unregister_pending(registered_event, index)) { + ret = SDEI_INVALID_PARAMETERS; + goto unlock_kvm; + } + + /* Check if the vcpu has been masked off */ + spin_lock(&vsdei->lock); + if (vsdei->state.masked) { + ret = SDEI_INVALID_PARAMETERS; + goto unlock_vcpu; + } + + /* Inject the event */ + if (do_inject_event(vcpu, registered_event, false)) + ret = SDEI_INVALID_PARAMETERS; + +unlock_vcpu: + spin_unlock(&vsdei->lock); +unlock_kvm: + spin_unlock(&ksdei->lock); +out: + return ret; +} + static unsigned long hypercall_reset(struct kvm_vcpu *vcpu, bool private) { struct kvm *kvm = vcpu->kvm; @@ -827,9 +887,11 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu) break; case SDEI_1_0_FN_SDEI_INTERRUPT_BIND: case SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE: - case SDEI_1_1_FN_SDEI_EVENT_SIGNAL: ret = SDEI_NOT_SUPPORTED; break; + case SDEI_1_1_FN_SDEI_EVENT_SIGNAL: + ret = hypercall_signal(vcpu); + break; case SDEI_1_0_FN_SDEI_PRIVATE_RESET: ret = hypercall_reset(vcpu, true); break; -- 2.23.0 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm