> From: Nadav Har'El > Sent: Tuesday, May 24, 2011 2:51 AM > > > >+ vmcs_init(vmx->loaded_vmcs->vmcs); > > >+ vmx->loaded_vmcs->cpu = -1; > > >+ vmx->loaded_vmcs->launched = 0; > > > > Perhaps a loaded_vmcs_init() to encapsulate initialization of these > > three fields, you'll probably reuse it later. > > It's good you pointed this out, because it made me suddenly realise that I > forgot to VMCLEAR the new vmcs02's I allocate. In practice it never made a > difference, but better safe than sorry. yes, that's what spec requires. You need VMCLEAR on any new VMCS which does implementation specific initialization in that VMCS region. > > I had to restructure some of the code a bit to be able to properly use this > new function (in 3 places - __loaded_vmcs_clear, nested_get_current_vmcs02, > vmx_create_cpu). > > > Please repost separately after the fix, I'd like to apply it before the > > rest of the series. > > I am adding a new version of this patch at the end of this mail. > > > (regarding interrupts, I think we can do that work post-merge. But I'd > > like to see Kevin's comments addressed) > > I replied to his comments. Done some of the things he asked, and asked for > more info on why/where he believes the current code is incorrect where I > didn't understand what problems he pointed to, and am now waiting for him > to reply. As I replied in another thread, I believe this has been explained clearly by Nadav. > > > ------- 8< ------ 8< ---------- 8< ---------- 8< ----------- 8< ----------- > > Subject: [PATCH 01/31] nVMX: Keep list of loaded VMCSs, instead of vcpus. > > In VMX, before we bring down a CPU we must VMCLEAR all VMCSs loaded on it > because (at least in theory) the processor might not have written all of its > content back to memory. Since a patch from June 26, 2008, this is done using > a per-cpu "vcpus_on_cpu" linked list of vcpus loaded on each CPU. > > The problem is that with nested VMX, we no longer have the concept of a > vcpu being loaded on a cpu: A vcpu has multiple VMCSs (one for L1, a pool for > L2s), and each of those may be have been last loaded on a different cpu. > > So instead of linking the vcpus, we link the VMCSs, using a new structure > loaded_vmcs. This structure contains the VMCS, and the information > pertaining > to its loading on a specific cpu (namely, the cpu number, and whether it > was already launched on this cpu once). In nested we will also use the same > structure to hold L2 VMCSs, and vmx->loaded_vmcs is a pointer to the > currently active VMCS. > > Signed-off-by: Nadav Har'El <nyh@xxxxxxxxxx> > --- > arch/x86/kvm/vmx.c | 150 ++++++++++++++++++++++++------------------- > 1 file changed, 86 insertions(+), 64 deletions(-) > > --- .before/arch/x86/kvm/vmx.c 2011-05-23 21:46:14.000000000 +0300 > +++ .after/arch/x86/kvm/vmx.c 2011-05-23 21:46:14.000000000 +0300 > @@ -116,6 +116,18 @@ struct vmcs { > char data[0]; > }; > > +/* > + * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also > + * remember whether it was VMLAUNCHed, and maintain a linked list of all > VMCSs > + * loaded on this CPU (so we can clear them if the CPU goes down). > + */ > +struct loaded_vmcs { > + struct vmcs *vmcs; > + int cpu; > + int launched; > + struct list_head loaded_vmcss_on_cpu_link; > +}; > + > struct shared_msr_entry { > unsigned index; > u64 data; > @@ -124,9 +136,7 @@ struct shared_msr_entry { > > struct vcpu_vmx { > struct kvm_vcpu vcpu; > - struct list_head local_vcpus_link; > unsigned long host_rsp; > - int launched; > u8 fail; > u8 cpl; > bool nmi_known_unmasked; > @@ -140,7 +150,14 @@ struct vcpu_vmx { > u64 msr_host_kernel_gs_base; > u64 msr_guest_kernel_gs_base; > #endif > - struct vmcs *vmcs; > + /* > + * loaded_vmcs points to the VMCS currently used in this vcpu. For a > + * non-nested (L1) guest, it always points to vmcs01. For a nested > + * guest (L2), it points to a different VMCS. > + */ > + struct loaded_vmcs vmcs01; > + struct loaded_vmcs *loaded_vmcs; > + bool __launched; /* temporary, used in > vmx_vcpu_run */ > struct msr_autoload { > unsigned nr; > struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; > @@ -200,7 +217,11 @@ static int vmx_set_tss_addr(struct kvm * > > static DEFINE_PER_CPU(struct vmcs *, vmxarea); > static DEFINE_PER_CPU(struct vmcs *, current_vmcs); > -static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); > +/* > + * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is > needed > + * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded > on it. > + */ > +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); > static DEFINE_PER_CPU(struct desc_ptr, host_gdt); > > static unsigned long *vmx_io_bitmap_a; > @@ -501,6 +522,13 @@ static void vmcs_clear(struct vmcs *vmcs > vmcs, phys_addr); > } > > +static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) > +{ > + vmcs_clear(loaded_vmcs->vmcs); > + loaded_vmcs->cpu = -1; > + loaded_vmcs->launched = 0; > +} > + call it vmcs_init instead since you now remove original vmcs_init invocation, which more reflect the necessity of adding VMCLEAR for a new vmcs? > static void vmcs_load(struct vmcs *vmcs) > { > u64 phys_addr = __pa(vmcs); > @@ -514,25 +542,24 @@ static void vmcs_load(struct vmcs *vmcs) > vmcs, phys_addr); > } > > -static void __vcpu_clear(void *arg) > +static void __loaded_vmcs_clear(void *arg) > { > - struct vcpu_vmx *vmx = arg; > + struct loaded_vmcs *loaded_vmcs = arg; > int cpu = raw_smp_processor_id(); > > - if (vmx->vcpu.cpu == cpu) > - vmcs_clear(vmx->vmcs); > - if (per_cpu(current_vmcs, cpu) == vmx->vmcs) > + if (loaded_vmcs->cpu != cpu) > + return; /* cpu migration can race with cpu offline */ what do you mean by "cpu migration" here? why does 'cpu offline' matter here regarding to the cpu change? > + if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) > per_cpu(current_vmcs, cpu) = NULL; > - list_del(&vmx->local_vcpus_link); > - vmx->vcpu.cpu = -1; > - vmx->launched = 0; > + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); > + loaded_vmcs_init(loaded_vmcs); > } > > -static void vcpu_clear(struct vcpu_vmx *vmx) > +static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) > { > - if (vmx->vcpu.cpu == -1) > - return; > - smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); > + if (loaded_vmcs->cpu != -1) > + smp_call_function_single( > + loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1); > } > > static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) > @@ -971,22 +998,22 @@ static void vmx_vcpu_load(struct kvm_vcp > > if (!vmm_exclusive) > kvm_cpu_vmxon(phys_addr); > - else if (vcpu->cpu != cpu) > - vcpu_clear(vmx); > + else if (vmx->loaded_vmcs->cpu != cpu) > + loaded_vmcs_clear(vmx->loaded_vmcs); > > - if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { > - per_cpu(current_vmcs, cpu) = vmx->vmcs; > - vmcs_load(vmx->vmcs); > + if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { > + per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; > + vmcs_load(vmx->loaded_vmcs->vmcs); > } > > - if (vcpu->cpu != cpu) { > + if (vmx->loaded_vmcs->cpu != cpu) { > struct desc_ptr *gdt = &__get_cpu_var(host_gdt); > unsigned long sysenter_esp; > > kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); > local_irq_disable(); > - list_add(&vmx->local_vcpus_link, > - &per_cpu(vcpus_on_cpu, cpu)); > + list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, > + &per_cpu(loaded_vmcss_on_cpu, cpu)); > local_irq_enable(); > > /* > @@ -998,6 +1025,7 @@ static void vmx_vcpu_load(struct kvm_vcp > > rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); > vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ > + vmx->loaded_vmcs->cpu = cpu; > } > } > > @@ -1005,7 +1033,8 @@ static void vmx_vcpu_put(struct kvm_vcpu > { > __vmx_load_host_state(to_vmx(vcpu)); > if (!vmm_exclusive) { > - __vcpu_clear(to_vmx(vcpu)); > + __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); > + vcpu->cpu = -1; > kvm_cpu_vmxoff(); > } > } > @@ -1469,7 +1498,7 @@ static int hardware_enable(void *garbage > if (read_cr4() & X86_CR4_VMXE) > return -EBUSY; > > - INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); > + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); > rdmsrl(MSR_IA32_FEATURE_CONTROL, old); > > test_bits = FEATURE_CONTROL_LOCKED; > @@ -1493,14 +1522,14 @@ static int hardware_enable(void *garbage > return 0; > } > > -static void vmclear_local_vcpus(void) > +static void vmclear_local_loaded_vmcss(void) > { > int cpu = raw_smp_processor_id(); > - struct vcpu_vmx *vmx, *n; > + struct loaded_vmcs *v, *n; > > - list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), > - local_vcpus_link) > - __vcpu_clear(vmx); > + list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), > + loaded_vmcss_on_cpu_link) > + __loaded_vmcs_clear(v); > } > > > @@ -1515,7 +1544,7 @@ static void kvm_cpu_vmxoff(void) > static void hardware_disable(void *garbage) > { > if (vmm_exclusive) { > - vmclear_local_vcpus(); > + vmclear_local_loaded_vmcss(); > kvm_cpu_vmxoff(); > } > write_cr4(read_cr4() & ~X86_CR4_VMXE); > @@ -1696,6 +1725,18 @@ static void free_vmcs(struct vmcs *vmcs) > free_pages((unsigned long)vmcs, vmcs_config.order); > } > > +/* > + * Free a VMCS, but before that VMCLEAR it on the CPU where it was last > loaded > + */ > +static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) > +{ > + if (!loaded_vmcs->vmcs) > + return; > + loaded_vmcs_clear(loaded_vmcs); > + free_vmcs(loaded_vmcs->vmcs); > + loaded_vmcs->vmcs = NULL; > +} prefer to not do cleanup work through loaded_vmcs since it's just a pointer to a loaded vmcs structure. Though you can carefully arrange the nested vmcs cleanup happening before it, it's not very clean and a bit error prone simply from this function itself. It's clearer to directly cleanup vmcs01, and if you want an assertion could be added to make sure loaded_vmcs doesn't point to any stale vmcs02 structure after nested cleanup step. Thanks, Kevin -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html