[PATCH 07/10] KVM: arm64: Separate host and hyp vcpu FP flags

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The FP flags on the vcpu are used as arguments to hyp and to track the
state as hyp runs. In protected mode, this sort of state needs to be
safe from meddling by the host. Begin the separation with the FP flags.

Since protected mode is only available for nVHE and nVHE does not yet
support SVE, the SVE flags are left untouched.

Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx>
---
 arch/arm64/include/asm/kvm_host.h       | 33 ++++++++++++++++++-------
 arch/arm64/kvm/fpsimd.c                 | 24 +++++++++---------
 arch/arm64/kvm/hyp/include/hyp/switch.h |  6 ++---
 arch/arm64/kvm/hyp/nvhe/switch.c        |  4 +--
 arch/arm64/kvm/hyp/vhe/switch.c         |  4 +--
 5 files changed, 44 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a01194371ae5..8c5242d4ed73 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -268,6 +268,16 @@ struct vcpu_reset_state {
 	bool		reset;
 };
 
+/*
+ * State that affects the behaviour of hyp when running a vcpu. In protected
+ * mode, the hypervisor will have a private copy of this state so that the host
+ * cannot interfere with the hyp while it is running.
+ */
+struct kvm_vcpu_arch_run {
+	/* Miscellaneous vcpu run state flags */
+	u64 flags;
+};
+
 struct kvm_vcpu_arch {
 	struct kvm_cpu_context ctxt;
 	void *sve_state;
@@ -289,6 +299,9 @@ struct kvm_vcpu_arch {
 	/* Miscellaneous vcpu state flags */
 	u64 flags;
 
+	/* State to manage running of the vcpu by hyp */
+	struct kvm_vcpu_arch_run run;
+
 	/*
 	 * We maintain more than a single set of debug registers to support
 	 * debugging the guest from the host and to maintain separate host and
@@ -390,15 +403,17 @@ struct kvm_vcpu_arch {
 
 /* vcpu_arch flags field values: */
 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0) /* vcpu is using debug */
-#define KVM_ARM64_FP_ENABLED		(1 << 1) /* guest FP regs loaded */
-#define KVM_ARM64_FP_HOST		(1 << 2) /* host FP regs loaded */
-#define KVM_ARM64_HOST_SVE_IN_USE	(1 << 3) /* backup for host TIF_SVE */
-#define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
-#define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
-#define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
-#define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
-#define KVM_ARM64_PENDING_EXCEPTION	(1 << 8) /* Exception pending */
-#define KVM_ARM64_EXCEPT_MASK		(7 << 9) /* Target EL/MODE */
+#define KVM_ARM64_HOST_SVE_IN_USE	(1 << 1) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED	(1 << 2) /* SVE enabled for EL0 */
+#define KVM_ARM64_GUEST_HAS_SVE		(1 << 3) /* SVE exposed to guest */
+#define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 4) /* SVE config completed */
+#define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 5) /* PTRAUTH exposed to guest */
+#define KVM_ARM64_PENDING_EXCEPTION	(1 << 6) /* Exception pending */
+#define KVM_ARM64_EXCEPT_MASK		(7 << 7) /* Target EL/MODE */
+
+/* vcpu_arch_run flags field values: */
+#define KVM_ARM64_RUN_FP_ENABLED	(1 << 0) /* guest FP regs loaded */
+#define KVM_ARM64_RUN_FP_HOST		(1 << 1) /* host FP regs loaded */
 
 /*
  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index dcc5bfad5408..74a4b55d1b37 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -43,8 +43,9 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
  * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
  * The actual loading is done by the FPSIMD access trap taken to hyp.
  *
- * Here, we just set the correct metadata to indicate that the FPSIMD
- * state in the cpu regs (if any) belongs to current on the host.
+ * Here, we just set the correct metadata to indicate that the FPSIMD state in
+ * the cpu regs (if any) belongs to current on the host and will need to be
+ * saved before replacing it.
  *
  * TIF_SVE is backed up here, since it may get clobbered with guest state.
  * This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
@@ -55,9 +56,10 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 
 	BUG_ON(!current->mm);
 
-	vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
-			      KVM_ARM64_FP_HOST |
-			      KVM_ARM64_HOST_SVE_IN_USE |
+	vcpu->arch.run.flags &= ~(KVM_ARM64_RUN_FP_ENABLED |
+				  KVM_ARM64_RUN_FP_HOST);
+
+	vcpu->arch.flags &= ~(KVM_ARM64_HOST_SVE_IN_USE |
 			      KVM_ARM64_HOST_SVE_ENABLED);
 
 	if (!system_supports_fpsimd())
@@ -70,9 +72,9 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 		clear_thread_flag(TIF_FOREIGN_FPSTATE);
 		update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
 
-		vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
+		vcpu->arch.run.flags |= KVM_ARM64_RUN_FP_ENABLED;
 	} else {
-		vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+		vcpu->arch.run.flags |= KVM_ARM64_RUN_FP_HOST;
 	}
 
 	if (test_thread_flag(TIF_SVE))
@@ -99,8 +101,8 @@ void kvm_arch_vcpu_sync_fp_before_hyp(struct kvm_vcpu *vcpu)
 		return;
 
 	if (test_thread_flag(TIF_FOREIGN_FPSTATE))
-		vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
-				      KVM_ARM64_FP_HOST);
+		vcpu->arch.run.flags &= ~(KVM_ARM64_RUN_FP_ENABLED |
+					  KVM_ARM64_RUN_FP_HOST);
 }
 
 /*
@@ -113,7 +115,7 @@ void kvm_arch_vcpu_sync_fp_after_hyp(struct kvm_vcpu *vcpu)
 {
 	WARN_ON_ONCE(!irqs_disabled());
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED) {
 		fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
 					 vcpu->arch.sve_state,
 					 vcpu->arch.sve_max_vl);
@@ -137,7 +139,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 
 	local_irq_save(flags);
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED) {
 		fpsimd_thread_switch(current);
 
 		if (guest_has_sve)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 1afee8557ddf..3f299c7d42cd 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -227,7 +227,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 
 	isb();
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_HOST) {
 		/*
 		 * In the SVE case, VHE is assumed: it is enforced by
 		 * Kconfig and kvm_arch_init().
@@ -243,7 +243,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 			__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
 		}
 
-		vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
+		vcpu->arch.run.flags &= ~KVM_ARM64_RUN_FP_HOST;
 	}
 
 	if (sve_guest) {
@@ -261,7 +261,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.fpsimd_cpu = smp_processor_id();
 
-	vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
+	vcpu->arch.run.flags |= KVM_ARM64_RUN_FP_ENABLED;
 
 	return true;
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6fc1e0a5adaa..f0a32c993ac4 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -41,7 +41,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
 	val = CPTR_EL2_DEFAULT;
 	val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
-	if (!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) {
+	if (!(vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED)) {
 		val |= CPTR_EL2_TFP;
 		__activate_traps_fpsimd32(vcpu);
 	}
@@ -230,7 +230,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_restore_state_nvhe(host_ctxt);
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED)
 		__fpsimd_save_fpexc32(vcpu);
 
 	/*
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index f6f60a537b3e..5bb6a2cf574d 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -54,7 +54,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
 	val |= CPTR_EL2_TAM;
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED) {
 		if (vcpu_has_sve(vcpu))
 			val |= CPACR_EL1_ZEN;
 	} else {
@@ -151,7 +151,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
 	sysreg_restore_host_state_vhe(host_ctxt);
 
-	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED)
 		__fpsimd_save_fpexc32(vcpu);
 
 	__debug_switch_to_host(vcpu);
-- 
2.30.1.766.gb4fecdf3b7-goog

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux