Synchronize between the VMCS1212 software controlled structure and the processor-specific shadow vmcs Signed-off-by: Abel Gordon <abelg@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) --- .before/arch/x86/kvm/vmx.c 2013-03-10 18:00:55.000000000 +0200 +++ .after/arch/x86/kvm/vmx.c 2013-03-10 18:00:55.000000000 +0200 @@ -1843,6 +1843,8 @@ static int nested_pf_handled(struct kvm_ return 0; nested_vmx_vmexit(vcpu); + if (enable_shadow_vmcs) + copy_vmcs12_to_shadow(to_vmx(vcpu)); return 1; } @@ -4430,6 +4432,8 @@ static int vmx_interrupt_allowed(struct nested_vmx_vmexit(vcpu); vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; vmcs12->vm_exit_intr_info = 0; + if (enable_shadow_vmcs) + copy_vmcs12_to_shadow(to_vmx(vcpu)); /* fall through to normal code, but now in L1, not L2 */ } @@ -5490,6 +5494,9 @@ static inline void nested_release_vmcs12 { if (enable_shadow_vmcs) { if (vmx->nested.current_vmcs12 != NULL) { + /* copy to memory all shadowed fields in case + they were modified */ + copy_shadow_to_vmcs12(vmx); free_vmcs(vmx->nested.current_shadow_vmcs); } } @@ -6037,6 +6044,7 @@ static int handle_vmptrld(struct kvm_vcp /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->nested.current_shadow_vmcs = shadow_vmcs; + copy_vmcs12_to_shadow(vmx); } } @@ -6425,6 +6433,8 @@ static int vmx_handle_exit(struct kvm_vc if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { nested_vmx_vmexit(vcpu); + if (enable_shadow_vmcs) + copy_vmcs12_to_shadow(vmx); return 1; } @@ -7384,6 +7394,8 @@ static int nested_vmx_run(struct kvm_vcp skip_emulated_instruction(vcpu); vmcs12 = get_vmcs12(vcpu); + if (enable_shadow_vmcs) + copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html