[PATCH 15/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This supports SDEI_1_0_FN_SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME}
hypercall by implementing kvm_sdei_hypercall_complete(). If there is
valid context, the registers are restored as below. Otherwise, errno
is returned.

   * x0 -> x17
   * PC and pState

If it's KVM private event, which is originated from KVM itself, the
notfier is executed. Besides, the IRQ exception is injected if the
request is to resume the guest by SDEI_1_0_FN_SDEI_EVENT_RESUME.
The behaviour is defined in SDEI specification (v1.0).

Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
 arch/arm64/include/asm/kvm_emulate.h |  2 +
 arch/arm64/kvm/aarch32.c             |  8 +++
 arch/arm64/kvm/inject_fault.c        | 30 ++++++++++
 arch/arm64/kvm/sdei.c                | 88 +++++++++++++++++++++++++++-
 4 files changed, 127 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4d0f8ea600ba..bb7aee5927a5 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -29,10 +29,12 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
 
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_irq(struct kvm_vcpu *vcpu);
 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
+void kvm_inject_irq32(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
 
diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c
index 40a62a99fbf8..73e9059cf2e8 100644
--- a/arch/arm64/kvm/aarch32.c
+++ b/arch/arm64/kvm/aarch32.c
@@ -181,6 +181,14 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu)
 	post_fault_synchronize(vcpu, loaded);
 }
 
+void kvm_inject_irq32(struct kvm_vcpu *vcpu)
+{
+	bool loaded = pre_fault_synchronize(vcpu);
+
+	prepare_fault32(vcpu, PSR_AA32_MODE_IRQ, 4);
+	post_fault_synchronize(vcpu, loaded);
+}
+
 /*
  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  * pseudocode.
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e21fdd93027a..84e50b002cd0 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -168,6 +168,22 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
 	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
 }
 
+static void inject_irq64(struct kvm_vcpu *vcpu)
+{
+	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+
+	enter_exception64(vcpu, PSR_MODE_EL1h, except_type_irq);
+
+	/*
+	 * Build an unknown exception, depending on the instruction
+	 * set.
+	 */
+	if (kvm_vcpu_trap_il_is32bit(vcpu))
+		esr |= ESR_ELx_IL;
+
+	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+}
+
 /**
  * kvm_inject_dabt - inject a data abort into the guest
  * @vcpu: The VCPU to receive the data abort
@@ -214,6 +230,20 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 		inject_undef64(vcpu);
 }
 
+/**
+ * kvm_inject_irq - inject an IRQ into the guest
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_irq(struct kvm_vcpu *vcpu)
+{
+	if (vcpu_el1_is_32bit(vcpu))
+		kvm_inject_irq32(vcpu);
+	else
+		inject_irq64(vcpu);
+}
+
 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
 {
 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 52d0f0809a37..cf6908e87edd 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -590,6 +590,77 @@ static unsigned long kvm_sdei_hypercall_enable(struct kvm_vcpu *vcpu,
 	return ret;
 }
 
+static unsigned long kvm_sdei_hypercall_complete(struct kvm_vcpu *vcpu,
+						 bool resume)
+{
+	struct kvm_sdei_event *event = NULL;
+	struct kvm_sdei_kvm_event *kevent = NULL;
+	struct kvm_sdei_vcpu_event *vevent = NULL;
+	struct user_pt_regs *regs;
+	int i;
+
+	spin_lock(&vcpu->arch.sdei_lock);
+
+	if (!vcpu->arch.sdei_critical_event &&
+	    !vcpu->arch.sdei_normal_event) {
+		spin_unlock(&vcpu->arch.sdei_lock);
+		return SDEI_DENIED;
+	}
+
+	if (vcpu->arch.sdei_critical_event) {
+		vevent = vcpu->arch.sdei_critical_event;
+		regs = &vcpu->arch.sdei_critical_regs;
+		vcpu->arch.sdei_critical_event = NULL;
+	} else if (vcpu->arch.sdei_normal_event) {
+		vevent = vcpu->arch.sdei_normal_event;
+		regs = &vcpu->arch.sdei_normal_regs;
+		vcpu->arch.sdei_normal_event = NULL;
+	}
+
+	/* Restore registers: x0 -> x17, PC, PState */
+	for (i = 0; i < 18; i++)
+		vcpu_set_reg(vcpu, i, regs->regs[i]);
+
+	*vcpu_cpsr(vcpu) = regs->pstate;
+	*vcpu_pc(vcpu) = regs->pc;
+
+	/* Notifier for KVM private event */
+	kevent = vevent->event;
+	event = kevent->event;
+	if (event->priv && event->priv->notifier) {
+		event->priv->notifier(vcpu, event->priv->num,
+				      KVM_SDEI_STATE_COMPLETED);
+	}
+
+	/* Inject interrupt if needed */
+	if (resume)
+		kvm_inject_irq(vcpu);
+
+	/* Release vCPU event if needed */
+	vevent->users--;
+	if (!vevent->users) {
+		list_del(&vevent->link);
+		kfree(vevent);
+	}
+
+	/* Queue request if pending events exist */
+	if (!list_empty(&vcpu->arch.sdei_events))
+		kvm_make_request(KVM_REQ_SDEI, vcpu);
+
+	spin_unlock(&vcpu->arch.sdei_lock);
+
+	/*
+	 * Update status to KVM event. We can't do this with the
+	 * vCPU lock hold. Otherwise, we might run into nested
+	 * locking issue.
+	 */
+	spin_lock(&event->lock);
+	kevent->users--;
+	spin_unlock(&event->lock);
+
+	return SDEI_SUCCESS;
+}
+
 static unsigned long kvm_sdei_hypercall_unregister(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -988,6 +1059,7 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
 {
 	u32 function = smccc_get_function(vcpu);
 	unsigned long ret;
+	bool has_result = true;
 
 	switch (function) {
 	case SDEI_1_0_FN_SDEI_VERSION:
@@ -1003,8 +1075,16 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
 		ret = kvm_sdei_hypercall_enable(vcpu, false);
 		break;
 	case SDEI_1_0_FN_SDEI_EVENT_CONTEXT:
+		ret = SDEI_NOT_SUPPORTED;
+		break;
 	case SDEI_1_0_FN_SDEI_EVENT_COMPLETE:
+		has_result = false;
+		ret = kvm_sdei_hypercall_complete(vcpu, false);
+		break;
 	case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME:
+		has_result = false;
+		ret = kvm_sdei_hypercall_complete(vcpu, true);
+		break;
 	case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER:
 		ret = kvm_sdei_hypercall_unregister(vcpu);
 		break;
@@ -1037,7 +1117,13 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
 		ret = SDEI_NOT_SUPPORTED;
 	}
 
-	smccc_set_retval(vcpu, ret, 0, 0, 0);
+	/*
+	 * For the COMPLETE or COMPLETE_AND_RESUME hypercalls,
+	 * we don't have return value. Otherwise, the restored
+	 * context is corrupted.
+	 */
+	if (has_result)
+		smccc_set_retval(vcpu, ret, 0, 0, 0);
 
 	return 1;
 }
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux