[RFC PATCH part-7 08/12] pkvm: x86: Donate shadow vm & vcpu pages to hypervisor

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Shaoqin Huang <shaoqin.huang@xxxxxxxxx>

The shadow vm/vcpu pages are allocated in KVM-high, and then be managed by
pKVM hypervisor, so such page's ownership shall be moved from host VM to
pKVM hypervisor by __pkvm_host_donate_hyp.

Above is done when doing shadow vm/vcpu initialization, while in shadow
vm/vcpu teardown, those pages shall return to host VM, by
__pkvm_hyp_donate_host.

Signed-off-by: Shaoqin Huang <shaoqin.huang@xxxxxxxxx>
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx>
---
 arch/x86/kvm/vmx/pkvm/hyp/pkvm.c     | 50 ++++++++++++++++++++++++----
 arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h |  6 ++++
 2 files changed, 49 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx/pkvm/hyp/pkvm.c b/arch/x86/kvm/vmx/pkvm/hyp/pkvm.c
index 63004ed6e90e..8a7305e9a68b 100644
--- a/arch/x86/kvm/vmx/pkvm/hyp/pkvm.c
+++ b/arch/x86/kvm/vmx/pkvm/hyp/pkvm.c
@@ -96,6 +96,7 @@ int __pkvm_init_shadow_vm(unsigned long kvm_va,
 			  size_t shadow_size)
 {
 	struct pkvm_shadow_vm *vm;
+	int shadow_vm_handle;
 
 	if (!PAGE_ALIGNED(shadow_pa) ||
 		!PAGE_ALIGNED(shadow_size) ||
@@ -103,29 +104,48 @@ int __pkvm_init_shadow_vm(unsigned long kvm_va,
 					   + pkvm_shadow_vcpu_array_size())))
 		return -EINVAL;
 
+	if (__pkvm_host_donate_hyp(shadow_pa, shadow_size))
+		return -EINVAL;
+
 	vm = pkvm_phys_to_virt(shadow_pa);
 
 	memset(vm, 0, shadow_size);
 	pkvm_spin_lock_init(&vm->lock);
 
 	vm->host_kvm_va = kvm_va;
+	vm->shadow_size = shadow_size;
 
 	if (pkvm_shadow_ept_init(&vm->sept_desc))
-		return -EINVAL;
+		goto undonate;
+
+	shadow_vm_handle = allocate_shadow_vm_handle(vm);
+	if (shadow_vm_handle < 0)
+		goto deinit_shadow_ept;
+
+	return shadow_vm_handle;
 
-	return allocate_shadow_vm_handle(vm);
+deinit_shadow_ept:
+	pkvm_shadow_ept_deinit(&vm->sept_desc);
+undonate:
+	memset(vm, 0, shadow_size);
+	__pkvm_hyp_donate_host(shadow_pa, shadow_size);
+	return -EINVAL;
 }
 
 unsigned long __pkvm_teardown_shadow_vm(int shadow_vm_handle)
 {
 	struct pkvm_shadow_vm *vm = free_shadow_vm_handle(shadow_vm_handle);
+	unsigned long shadow_size;
 
 	if (!vm)
 		return 0;
 
 	pkvm_shadow_ept_deinit(&vm->sept_desc);
 
-	memset(vm, 0, sizeof(struct pkvm_shadow_vm) + pkvm_shadow_vcpu_array_size());
+	shadow_size = vm->shadow_size;
+	memset(vm, 0, shadow_size);
+
+	WARN_ON(__pkvm_hyp_donate_host(pkvm_virt_to_phys(vm), shadow_size));
 
 	return pkvm_virt_to_phys(vm);
 }
@@ -321,32 +341,44 @@ s64 __pkvm_init_shadow_vcpu(struct kvm_vcpu *hvcpu, int shadow_vm_handle,
 		(pkvm_hyp->vmcs_config.size > PAGE_SIZE))
 		return -EINVAL;
 
+	if (__pkvm_host_donate_hyp(shadow_pa, shadow_size))
+		return -EINVAL;
+
 	shadow_vcpu = pkvm_phys_to_virt(shadow_pa);
 	memset(shadow_vcpu, 0, shadow_size);
+	shadow_vcpu->shadow_size = shadow_size;
 
 	ret = read_gva(hvcpu, vcpu_va, &shadow_vcpu->vmx, sizeof(struct vcpu_vmx), &e);
 	if (ret < 0)
-		return -EINVAL;
+		goto undonate;
 
 	vmcs12_va = (unsigned long)shadow_vcpu->vmx.vmcs01.vmcs;
 	if (gva2gpa(hvcpu, vmcs12_va, (gpa_t *)&shadow_vcpu->vmcs12_pa, 0, &e))
-		return -EINVAL;
+		goto undonate;
 
 	vm = get_shadow_vm(shadow_vm_handle);
 	if (!vm)
-		return -EINVAL;
+		goto undonate;
 
 	shadow_vcpu_handle = attach_shadow_vcpu_to_vm(vm, shadow_vcpu);
 
 	put_shadow_vm(shadow_vm_handle);
 
+	if (shadow_vcpu_handle < 0)
+		goto undonate;
+
 	return shadow_vcpu_handle;
+undonate:
+	memset(shadow_vcpu, 0, shadow_size);
+	__pkvm_hyp_donate_host(shadow_pa, shadow_size);
+	return -EINVAL;
 }
 
 unsigned long __pkvm_teardown_shadow_vcpu(s64 shadow_vcpu_handle)
 {
 	int shadow_vm_handle = to_shadow_vm_handle(shadow_vcpu_handle);
 	struct shadow_vcpu_state *shadow_vcpu;
+	unsigned long shadow_size;
 	struct pkvm_shadow_vm *vm = get_shadow_vm(shadow_vm_handle);
 
 	if (!vm)
@@ -359,7 +391,11 @@ unsigned long __pkvm_teardown_shadow_vcpu(s64 shadow_vcpu_handle)
 	if (!shadow_vcpu)
 		return 0;
 
-	memset(shadow_vcpu, 0, sizeof(struct shadow_vcpu_state));
+	shadow_size = shadow_vcpu->shadow_size;
+	memset(shadow_vcpu, 0, shadow_size);
+	WARN_ON(__pkvm_hyp_donate_host(pkvm_virt_to_phys(shadow_vcpu),
+				       shadow_size));
+
 	return pkvm_virt_to_phys(shadow_vcpu);
 }
 
diff --git a/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h b/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h
index bf5719eefa0e..0a57c19ce4a5 100644
--- a/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h
+++ b/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h
@@ -38,6 +38,9 @@ struct shadow_vcpu_state {
 
 	struct pkvm_shadow_vm *vm;
 
+	/* The donated size of shadow_vcpu. */
+	unsigned long shadow_size;
+
 	struct hlist_node hnode;
 	unsigned long vmcs12_pa;
 	bool vmcs02_inited;
@@ -93,6 +96,9 @@ struct pkvm_shadow_vm {
 	/* The host's kvm va. */
 	unsigned long host_kvm_va;
 
+	/* The donated size of shadow_vm. */
+	unsigned long shadow_size;
+
 	/*
 	 * VM's shadow EPT. All vCPU shares one mapping.
 	 * FIXME: a potential security issue if some vCPUs are
-- 
2.25.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux