[PATCH v2 4/4] nested vmx: use a list to store the launched vmcs12 for L1 VMM

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The launch state is not a member in the VMCS area, use a separate
variable (list) to store it instead.

Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
---
 arch/x86/kvm/vmx.c |   86 +++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 81 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 20de88b..3be9265 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -177,8 +177,7 @@ struct __packed vmcs12 {
 	u32 revision_id;
 	u32 abort;
 
-	u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
-	u32 padding[7]; /* room for future expansion */
+	u32 padding[8]; /* room for future expansion */
 
 	u64 io_bitmap_a;
 	u64 io_bitmap_b;
@@ -339,6 +338,11 @@ struct vmcs02_list {
 	struct loaded_vmcs vmcs02;
 };
 
+struct vmcs12_list {
+	unsigned long vmcs12_pa;
+	struct list_head node;
+};
+
 /*
  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -364,6 +368,8 @@ struct nested_vmx {
 	 * we must keep them pinned while L2 runs.
 	 */
 	struct page *apic_access_page;
+	/* vmcs12_pool contains the launched vmcs12. */
+	struct list_head vmcs12_pool;
 };
 
 struct vcpu_vmx {
@@ -619,6 +625,58 @@ static void nested_release_page_clean(struct page *page)
 	kvm_release_page_clean(page);
 }
 
+static int vmcs12_launched(struct list_head *vmcs12_pool,
+			       unsigned long vmcs12_pa)
+{
+	struct vmcs12_list *iter;
+	struct list_head *pos;
+	int launched = 0;
+
+	list_for_each(pos, vmcs12_pool) {
+		iter = list_entry(pos, struct vmcs12_list, node);
+		if (vmcs12_pa == iter->vmcs12_pa) {
+			launched = 1;
+			break;
+		}
+	}
+
+	return launched;
+}
+
+static int set_vmcs12_launched(struct list_head *vmcs12_pool,
+			   unsigned long vmcs12_pa)
+{
+	struct vmcs12_list *vmcs12;
+
+	if (vmcs12_launched(vmcs12_pool, vmcs12_pa))
+		return 0;
+
+	vmcs12 = kzalloc(sizeof(struct vmcs12_list), GFP_KERNEL);
+	if (!vmcs12)
+		return -ENOMEM;
+
+	vmcs12->vmcs12_pa = vmcs12_pa;
+	list_add(&vmcs12->node, vmcs12_pool);
+
+	return 0;
+}
+
+static void clear_vmcs12_launched(struct list_head *vmcs12_pool,
+			       unsigned long vmcs12_pa)
+{
+	struct vmcs12_list *iter;
+	struct list_head *pos;
+
+	list_for_each(pos, vmcs12_pool) {
+		iter = list_entry(pos, struct vmcs12_list, node);
+		if (vmcs12_pa == iter->vmcs12_pa) {
+			list_del(&iter->node);
+			kfree(iter);
+			break;
+		}
+	}
+}
+
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
@@ -5116,6 +5174,18 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
 }
 
 /*
+ * Free the vmcs12 list.
+ */
+static void nested_free_vmcs12_list(struct vcpu_vmx *vmx)
+{
+	struct vmcs12_list *item, *n;
+	list_for_each_entry_safe(item, n, &vmx->nested.vmcs12_pool, node) {
+		list_del(&item->node);
+		kfree(item);
+	}
+}
+
+/*
  * Emulate the VMXON instruction.
  * Currently, we just remember that VMX is active, and do not save or even
  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
@@ -5212,6 +5282,7 @@ static void free_nested(struct vcpu_vmx *vmx)
 	}
 
 	nested_free_all_saved_vmcss(vmx);
+	nested_free_vmcs12_list(vmx);
 }
 
 /* Emulate the VMXOFF instruction */
@@ -5364,7 +5435,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 	vmcs12 = kmap(page);
-	vmcs12->launch_state = 0;
+	clear_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12));
 	kunmap(page);
 	nested_release_page(page);
 
@@ -6460,6 +6531,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmx->nested.current_vmptr = -1ull;
 	vmx->nested.current_vmcs12 = NULL;
+	INIT_LIST_HEAD(&vmx->nested.vmcs12_pool);
 
 	return &vmx->vcpu;
 
@@ -6839,6 +6911,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int cpu;
 	struct loaded_vmcs *vmcs02;
+	int is_launched;
 
 	if (!nested_vmx_check_permission(vcpu) ||
 	    !nested_vmx_check_vmcs12(vcpu))
@@ -6857,7 +6930,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	 * for misconfigurations which will anyway be caught by the processor
 	 * when using the merged vmcs02.
 	 */
-	if (vmcs12->launch_state == launch) {
+	is_launched =
+		vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12));
+	if (is_launched == launch) {
 		nested_vmx_failValid(vcpu,
 			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
 			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
@@ -6946,7 +7021,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	vcpu->cpu = cpu;
 	put_cpu();
 
-	vmcs12->launch_state = 1;
+	if (set_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)) < 0)
+		return -ENOMEM;
 
 	prepare_vmcs02(vcpu);
 
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux