On Wed, Apr 17, 2013 at 08:09:40PM +0300, Abel Gordon wrote: > Synchronize between the VMCS12 software controlled structure and the > processor-specific shadow vmcs > > Signed-off-by: Abel Gordon <abelg@xxxxxxxxxx> > --- > arch/x86/kvm/vmx.c | 32 ++++++++++++++++++++++++++++++++ > 1 file changed, 32 insertions(+) > > --- .before/arch/x86/kvm/vmx.c 2013-04-17 19:58:33.000000000 +0300 > +++ .after/arch/x86/kvm/vmx.c 2013-04-17 19:58:33.000000000 +0300 > @@ -356,6 +356,11 @@ struct nested_vmx { > struct page *current_vmcs12_page; > struct vmcs12 *current_vmcs12; > struct vmcs *current_shadow_vmcs; > + /* > + * Indicates if the shadow vmcs must be updated with the > + * data hold by vmcs12 > + */ > + bool sync_shadow_vmcs; > > /* vmcs02_list cache of VMCSs recently used to run L2 guests */ > struct list_head vmcs02_pool; > @@ -5596,6 +5601,14 @@ static int nested_vmx_check_permission(s > > static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) > { > + if (enable_shadow_vmcs) { > + if (vmx->nested.current_vmcs12 != NULL) { > + /* copy to memory all shadowed fields in case > + they were modified */ > + copy_shadow_to_vmcs12(vmx); > + vmx->nested.sync_shadow_vmcs = false; > + } > + } > kunmap(vmx->nested.current_vmcs12_page); > nested_release_page(vmx->nested.current_vmcs12_page); > } > @@ -5724,6 +5737,10 @@ static void nested_vmx_failValid(struct > X86_EFLAGS_SF | X86_EFLAGS_OF)) > | X86_EFLAGS_ZF); > get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; > + /* > + * We don't need to force a shadow sync because > + * VM_INSTRUCTION_ERROR is not shadowed > + */ > } > > /* Emulate the VMCLEAR instruction */ > @@ -6122,6 +6139,9 @@ static int handle_vmptrld(struct kvm_vcp > vmx->nested.current_vmptr = vmptr; > vmx->nested.current_vmcs12 = new_vmcs12; > vmx->nested.current_vmcs12_page = page; > + if (enable_shadow_vmcs) { > + vmx->nested.sync_shadow_vmcs = true; > + } No need braces around one statement according to kernel coding style. > } > > nested_vmx_succeed(vcpu); > @@ -6880,6 +6900,11 @@ static void __noclone vmx_vcpu_run(struc > if (vmx->emulation_required) > return; > > + if (vmx->nested.sync_shadow_vmcs) { > + copy_vmcs12_to_shadow(vmx); > + vmx->nested.sync_shadow_vmcs = false; > + } > + > if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) > vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); > if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) > @@ -7490,6 +7515,9 @@ static int nested_vmx_run(struct kvm_vcp > skip_emulated_instruction(vcpu); > vmcs12 = get_vmcs12(vcpu); > > + if (enable_shadow_vmcs) > + copy_shadow_to_vmcs12(vmx); > + > /* > * The nested entry process starts with enforcing various prerequisites > * on vmcs12 as required by the Intel SDM, and act appropriately when > @@ -7932,6 +7960,8 @@ static void nested_vmx_vmexit(struct kvm > nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); > } else > nested_vmx_succeed(vcpu); > + if (enable_shadow_vmcs) > + vmx->nested.sync_shadow_vmcs = true; > } > > /* > @@ -7949,6 +7979,8 @@ static void nested_vmx_entry_failure(str > vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; > vmcs12->exit_qualification = qualification; > nested_vmx_succeed(vcpu); > + if (enable_shadow_vmcs) > + to_vmx(vcpu)->nested.sync_shadow_vmcs = true; > } > > static int vmx_check_intercept(struct kvm_vcpu *vcpu, > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html