This supports event injection, delivery and cancellation. The event is injected and cancelled by kvm_sdei_{inject, cancel}_event(). For event delivery, kvm_sdei_deliver_event() is added to accommodate KVM_REQ_SDEI request. The KVM_REQ_SDEI request can be raised in several situation: * PE is unmasked * Event is enabled * Completion of currently running event or handler on receiving EVENT_COMPLETE or EVENT_COMPLETE_AND_RESUME hypercall, which will be supported in the subsequent patch. Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_sdei.h | 4 + arch/arm64/kvm/arm.c | 3 + arch/arm64/kvm/sdei.c | 123 ++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+) diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h index 609338b17478..735d9ac1a5a2 100644 --- a/arch/arm64/include/asm/kvm_sdei.h +++ b/arch/arm64/include/asm/kvm_sdei.h @@ -64,6 +64,10 @@ struct kvm_sdei_vcpu { /* APIs */ int kvm_sdei_call(struct kvm_vcpu *vcpu); +int kvm_sdei_inject_event(struct kvm_vcpu *vcpu, + unsigned int num, bool immediate); +int kvm_sdei_cancel_event(struct kvm_vcpu *vcpu, unsigned int num); +void kvm_sdei_deliver_event(struct kvm_vcpu *vcpu); void kvm_sdei_create_vcpu(struct kvm_vcpu *vcpu); void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e9516f951e7b..06cb5e38634e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -720,6 +720,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) kvm_reset_vcpu(vcpu); + if (kvm_check_request(KVM_REQ_SDEI, vcpu)) + kvm_sdei_deliver_event(vcpu); + /* * Clear IRQ_PENDING requests that were made to guarantee * that a VCPU sees new virtual interrupts. diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c index 42ba6f97b168..36a72c1750fc 100644 --- a/arch/arm64/kvm/sdei.c +++ b/arch/arm64/kvm/sdei.c @@ -266,6 +266,129 @@ int kvm_sdei_call(struct kvm_vcpu *vcpu) return 1; } +int kvm_sdei_inject_event(struct kvm_vcpu *vcpu, + unsigned int num, + bool immediate) +{ + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + + if (!vsdei) + return -EPERM; + + if (num >= KVM_NR_SDEI_EVENTS || !test_bit(num, &vsdei->registered)) + return -ENOENT; + + /* + * The event may be expected to be delivered immediately. There + * are several cases we can't do this: + * + * (1) The PE has been masked from any events. + * (2) The event isn't enabled yet. + * (3) There are any pending or running events. + */ + if (immediate && + ((vcpu->arch.flags & KVM_ARM64_SDEI_MASKED) || + !test_bit(num, &vsdei->enabled) || + vsdei->pending || vsdei->running)) + return -EBUSY; + + set_bit(num, &vsdei->pending); + if (!(vcpu->arch.flags & KVM_ARM64_SDEI_MASKED) && + test_bit(num, &vsdei->enabled)) + kvm_make_request(KVM_REQ_SDEI, vcpu); + + return 0; +} + +int kvm_sdei_cancel_event(struct kvm_vcpu *vcpu, unsigned int num) +{ + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + + if (!vsdei) + return -EPERM; + + if (num >= KVM_NR_SDEI_EVENTS || !test_bit(num, &vsdei->registered)) + return -ENOENT; + + if (test_bit(num, &vsdei->running)) + return -EBUSY; + + clear_bit(num, &vsdei->pending); + + return 0; +} + +void kvm_sdei_deliver_event(struct kvm_vcpu *vcpu) +{ + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + struct kvm_sdei_event_context *ctxt = &vsdei->ctxt; + unsigned int num, i; + unsigned long pstate; + + if (!vsdei || (vcpu->arch.flags & KVM_ARM64_SDEI_MASKED)) + return; + + /* + * All supported events have normal priority. So the currently + * running event can't be preempted by any one else. + */ + if (vsdei->running) + return; + + /* Select next pending event to be delivered */ + num = 0; + while (num < KVM_NR_SDEI_EVENTS) { + num = find_next_bit(&vsdei->pending, KVM_NR_SDEI_EVENTS, num); + if (test_bit(num, &vsdei->enabled)) + break; + } + + if (num >= KVM_NR_SDEI_EVENTS) + return; + + /* + * Save the interrupted context. We might have pending request + * to adjust PC. Lets adjust it now so that the resume address + * is correct when COMPLETE or COMPLETE_AND_RESUME hypercall + * is handled. + */ + __kvm_adjust_pc(vcpu); + ctxt->pc = *vcpu_pc(vcpu); + ctxt->pstate = *vcpu_cpsr(vcpu); + for (i = 0; i < ARRAY_SIZE(ctxt->regs); i++) + ctxt->regs[i] = vcpu_get_reg(vcpu, i); + + /* + * Inject event. The following registers are modified according + * to the specification. + * + * x0: event number + * x1: argument specified when the event is registered + * x2: PC of the interrupted context + * x3: PSTATE of the interrupted context + * PC: event handler + * PSTATE: Cleared nRW bit, but D/A/I/F bits are set + */ + for (i = 0; i < ARRAY_SIZE(ctxt->regs); i++) + vcpu_set_reg(vcpu, i, 0); + + vcpu_set_reg(vcpu, 0, num); + vcpu_set_reg(vcpu, 1, vsdei->handlers[num].ep_arg); + vcpu_set_reg(vcpu, 2, ctxt->pc); + vcpu_set_reg(vcpu, 3, ctxt->pstate); + + pstate = ctxt->pstate; + pstate &= ~(PSR_MODE32_BIT | PSR_MODE_MASK); + pstate |= (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h); + + *vcpu_cpsr(vcpu) = pstate; + *vcpu_pc(vcpu) = vsdei->handlers[num].ep_addr; + + /* Update event states */ + clear_bit(num, &vsdei->pending); + set_bit(num, &vsdei->running); +} + void kvm_sdei_create_vcpu(struct kvm_vcpu *vcpu) { struct kvm_sdei_vcpu *vsdei; -- 2.23.0 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm