[PATCH 14/37] KVM: arm64: nVHE: Use __kvm_vcpu_run for the host vcpu

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Keeping the assumption that transitions will only be from the host to a
guest and vice-versa, make __kvm_vcpu_run switch to the requested vcpu
and run it.

The context is only switched when it is not currently active allowing
the host to be run repeatedly when servicing HVCs.

Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx>
---
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 22 ++-----
 arch/arm64/kvm/hyp/nvhe/switch.c   | 95 ++++++++++++++++++++++--------
 2 files changed, 74 insertions(+), 43 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2d621bf5ac3e..213977634601 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -115,10 +115,8 @@ void __noreturn kvm_hyp_main(void)
 {
 	/* Set tpidr_el2 for use by HYP */
 	struct kvm_vcpu *host_vcpu;
-	struct kvm_cpu_context *hyp_ctxt;
 
 	host_vcpu = __hyp_this_cpu_ptr(kvm_host_vcpu);
-	hyp_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 
 	kvm_init_host_cpu_context(&host_vcpu->arch.ctxt);
 
@@ -131,24 +129,14 @@ void __noreturn kvm_hyp_main(void)
 	 */
 	smccc_set_retval(host_vcpu, SMCCC_RET_SUCCESS, 0, 0, 0);
 
+	/* The host is already loaded so note it as the running vcpu. */
+	*__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = host_vcpu;
+
 	while (true) {
 		u64 exit_code;
 
-		/*
-		 * Set the running cpu for the vectors to pass to __guest_exit
-		 * so it can get the cpu context.
-		 */
-		*__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = host_vcpu;
-
-		/*
-		 * Enter the host now that we feel like we're in charge.
-		 *
-		 * This should merge with __kvm_vcpu_run as host becomes more
-		 * vcpu-like.
-		 */
-		do {
-			exit_code = __guest_enter(host_vcpu, hyp_ctxt);
-		} while (fixup_guest_exit(host_vcpu, &exit_code));
+		/* Enter the host now that we feel like we're in charge. */
+		exit_code = __kvm_vcpu_run(host_vcpu);
 
 		switch (ARM_EXCEPTION_CODE(exit_code)) {
 		case ARM_EXCEPTION_TRAP:
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 81cdf33f92bc..36140686e1d8 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -119,7 +119,7 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
 /**
  * Disable host events, enable guest events
  */
-static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+static void __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
 {
 	struct kvm_host_data *host;
 	struct kvm_pmu_events *pmu;
@@ -132,8 +132,6 @@ static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
 
 	if (pmu->events_guest)
 		write_sysreg(pmu->events_guest, pmcntenset_el0);
-
-	return (pmu->events_host || pmu->events_guest);
 }
 
 /**
@@ -154,13 +152,10 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
-/* Switch to the guest for legacy non-VHE systems */
-int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+static void __kvm_vcpu_switch_to_guest(struct kvm_cpu_context *host_ctxt,
+				       struct kvm_vcpu *vcpu)
 {
-	struct kvm_cpu_context *host_ctxt;
-	struct kvm_cpu_context *guest_ctxt;
-	bool pmu_switch_needed;
-	u64 exit_code;
+	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
 
 	/*
 	 * Having IRQs masked via PMR when entering the guest means the GIC
@@ -173,11 +168,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		pmr_sync();
 	}
 
-	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
-	*__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = vcpu;
-	guest_ctxt = &vcpu->arch.ctxt;
-
-	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+	__pmu_switch_to_guest(host_ctxt);
 
 	__sysreg_save_state_nvhe(host_ctxt);
 
@@ -199,17 +190,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__timer_enable_traps(vcpu);
 
 	__debug_switch_to_guest(vcpu);
+}
 
-	__set_vcpu_arch_workaround_state(vcpu);
-
-	do {
-		/* Jump in the fire! */
-		exit_code = __guest_enter(vcpu, host_ctxt);
-
-		/* And we're baaack! */
-	} while (fixup_guest_exit(vcpu, &exit_code));
-
-	__set_hyp_arch_workaround_state(vcpu);
+static void __kvm_vcpu_switch_to_host(struct kvm_cpu_context *host_ctxt,
+				      struct kvm_vcpu *host_vcpu,
+				      struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
@@ -230,12 +217,68 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	 */
 	__debug_switch_to_host(vcpu);
 
-	if (pmu_switch_needed)
-		__pmu_switch_to_host(host_ctxt);
+	__pmu_switch_to_host(host_ctxt);
 
 	/* Returning to host will clear PSR.I, remask PMR if needed */
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(GIC_PRIO_IRQOFF);
+}
+
+static void __vcpu_switch_to(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *host_ctxt;
+	struct kvm_vcpu *running_vcpu;
+
+	/*
+	 * Restoration is not yet pure so it still makes use of the previously
+	 * running vcpu.
+	 */
+	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+	running_vcpu = __hyp_this_cpu_read(kvm_hyp_running_vcpu);
+
+	if (vcpu->arch.ctxt.is_host)
+		__kvm_vcpu_switch_to_host(host_ctxt, vcpu, running_vcpu);
+	else
+		__kvm_vcpu_switch_to_guest(host_ctxt, vcpu);
+
+	*__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = vcpu;
+}
+
+/* Switch to the guest for legacy non-VHE systems */
+int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *host_ctxt;
+	struct kvm_vcpu *running_vcpu;
+	u64 exit_code;
+
+	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+	running_vcpu = __hyp_this_cpu_read(kvm_hyp_running_vcpu);
+
+	if (running_vcpu != vcpu) {
+		if (!running_vcpu->arch.ctxt.is_host &&
+		    !vcpu->arch.ctxt.is_host) {
+			/*
+			 * There are still assumptions that the switch will
+			 * always be between a guest and the host so double
+			 * check that is the case. If it isn't, pretending
+			 * there was an interrupt is a harmless way to bail.
+			 */
+			return ARM_EXCEPTION_IRQ;
+		}
+
+		__vcpu_switch_to(vcpu);
+	}
+
+	__set_vcpu_arch_workaround_state(vcpu);
+
+	do {
+		/* Jump in the fire! */
+		exit_code = __guest_enter(vcpu, host_ctxt);
+
+		/* And we're baaack! */
+	} while (fixup_guest_exit(vcpu, &exit_code));
+
+	__set_hyp_arch_workaround_state(vcpu);
 
 	return exit_code;
 }
-- 
2.27.0.389.gc38d7665816-goog

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux