[PATCH 09/10] KVM: arm64: Use hyp-private run struct in protected mode

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The run struct affects how hyp handles the guest's state so it needs to
be kept safe from the host in protected mode. Copy the relevant values
into hyp-private memory while running a vcpu to achieve this.

In the traditional, non-protected, mode, there's no need to protect the
values from the host so the run struct in host memory is used directly.

Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx>
---
 arch/arm64/kvm/hyp/nvhe/switch.c | 33 +++++++++++++++++++++++++++++---
 1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 076c2200324f..a0fbaf0ee309 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -165,9 +165,26 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/* Snapshot state from the host to private memory and sanitize them. */
+void __sync_vcpu_before_run(struct kvm_vcpu *vcpu, struct kvm_vcpu_arch_run *run)
+{
+	run->flags = vcpu->arch.run.flags;
+
+	/* Clear host state to make misuse apparent. */
+	vcpu->arch.run.flags = 0;
+}
+
+/* Sanitize the run state before writing it back to the host. */
+void __sync_vcpu_after_run(struct kvm_vcpu *vcpu, struct kvm_vcpu_arch_run *run)
+{
+	vcpu->arch.run.flags = run->flags;
+}
+
 /* Switch to the guest for legacy non-VHE systems */
 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 {
+	struct kvm_vcpu_arch_run protected_run;
+	struct kvm_vcpu_arch_run *run;
 	struct kvm_cpu_context *host_ctxt;
 	struct kvm_cpu_context *guest_ctxt;
 	bool pmu_switch_needed;
@@ -184,6 +201,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		pmr_sync();
 	}
 
+	if (is_protected_kvm_enabled()) {
+		run = &protected_run;
+		__sync_vcpu_before_run(vcpu, run);
+	} else {
+		run = &vcpu->arch.run;
+	}
+
 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
 	host_ctxt->__hyp_running_vcpu = vcpu;
 	guest_ctxt = &vcpu->arch.ctxt;
@@ -206,7 +230,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__sysreg_restore_state_nvhe(guest_ctxt);
 
 	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
-	__activate_traps(vcpu, &vcpu->arch.run);
+	__activate_traps(vcpu, run);
 
 	__hyp_vgic_restore_state(vcpu);
 	__timer_enable_traps(vcpu);
@@ -218,7 +242,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		exit_code = __guest_enter(vcpu);
 
 		/* And we're baaack! */
-	} while (fixup_guest_exit(vcpu, &vcpu->arch.run, &exit_code));
+	} while (fixup_guest_exit(vcpu, run, &exit_code));
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
@@ -230,7 +254,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_restore_state_nvhe(host_ctxt);
 
-	if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED)
+	if (run->flags & KVM_ARM64_RUN_FP_ENABLED)
 		__fpsimd_save_fpexc32(vcpu);
 
 	/*
@@ -248,6 +272,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	host_ctxt->__hyp_running_vcpu = NULL;
 
+	if (is_protected_kvm_enabled())
+		__sync_vcpu_after_run(vcpu, run);
+
 	return exit_code;
 }
 
-- 
2.30.1.766.gb4fecdf3b7-goog

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux