On 10/17/2010 12:07 PM, Nadav Har'El wrote:
In this patch we add a list of L0 (hardware) VMCSs, which we'll use to hold a
hardware VMCS for each active vmcs12 (i.e., for each L2 guest).
We call each of these L0 VMCSs a "vmcs02", as it is the VMCS that L0 uses
to run its nested guest L2.
+
+/*
+ * Allocate an L0 VMCS (vmcs02) for the current L1 VMCS (vmcs12), if one
+ * does not already exist. The allocation is done in L0 memory, so to avoid
+ * denial-of-service attack by guests, we limit the number of concurrently-
+ * allocated vmcss. A well-behaving L1 will VMCLEAR unused vmcs12s and not
+ * trigger this limit.
+ */
+static const int NESTED_MAX_VMCS = 256;
#define, top of file
+static int nested_create_current_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct vmcs_list *new_l2_guest;
+ struct vmcs *vmcs02;
+
+ if (nested_get_current_vmcs(vcpu))
+ return 0; /* nothing to do - we already have a VMCS */
+
+ if (to_vmx(vcpu)->nested.vmcs02_num>= NESTED_MAX_VMCS)
+ return -ENOMEM;
Why not just free_l1_state()?
You can have just nested_get_current_vmcs() which creates the vmcs if
necessary and returns an old if cached.
+
+ new_l2_guest = (struct vmcs_list *)
+ kmalloc(sizeof(struct vmcs_list), GFP_KERNEL);
+ if (!new_l2_guest)
+ return -ENOMEM;
+
+ vmcs02 = alloc_vmcs();
+ if (!vmcs02) {
+ kfree(new_l2_guest);
+ return -ENOMEM;
+ }
+
+ new_l2_guest->vmcs12_addr = to_vmx(vcpu)->nested.current_vmptr;
+ new_l2_guest->vmcs02 = vmcs02;
+ list_add(&(new_l2_guest->list),&(to_vmx(vcpu)->nested.vmcs02_list));
+ to_vmx(vcpu)->nested.vmcs02_num++;
+ return 0;
+}
+
@@ -4409,6 +4503,8 @@ static void vmx_free_vcpu(struct kvm_vcp
kunmap(to_vmx(vcpu)->nested.current_vmcs12_page);
nested_release_page(to_vmx(vcpu)->nested.current_vmcs12_page);
}
+ if (vmx->nested.vmxon)
+ free_l1_state(vcpu);
Can be called unconditionally.
vmx_free_vmcs(vcpu);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
--
error compiling committee.c: too many arguments to function
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html