[PATCH v2 3/4] nested vmx: use vmcs12_read/write() to operate VMCS fields

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When referencing vmcs12 fields, the current approach is to use
"struct.field" style. This commit replace all the current solution
by calling vmcs12_read() and vmcs12_write() fucntions.

Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
---
 arch/x86/kvm/vmx.c |  591 ++++++++++++++++++++++++++++------------------------
 1 files changed, 317 insertions(+), 274 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 639cad0..20de88b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -629,6 +629,11 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
 static void vmx_get_segment(struct kvm_vcpu *vcpu,
 			    struct kvm_segment *var, int seg);
 
+static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long field);
+static inline void vmcs12_write(struct kvm_vcpu *vcpu,
+				unsigned long field,
+				u64 value);
+
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 /*
@@ -891,19 +896,19 @@ static inline bool report_flexpriority(void)
 
 static inline bool nested_cpu_has(struct kvm_vcpu *vcpu, u32 bit)
 {
-	return get_vmcs12(vcpu)->cpu_based_vm_exec_control & bit;
+	return vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) & bit;
 }
 
 static inline bool nested_cpu_has2(struct kvm_vcpu *vcpu, u32 bit)
 {
-	return (get_vmcs12(vcpu)->cpu_based_vm_exec_control &
+	return (vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) &
 			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
-		(get_vmcs12(vcpu)->secondary_vm_exec_control & bit);
+		(vmcs12_read(vcpu, SECONDARY_VM_EXEC_CONTROL) & bit);
 }
 
 static inline bool nested_cpu_has_virtual_nmis(struct kvm_vcpu *vcpu)
 {
-	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+	return vmcs12_read(vcpu, PIN_BASED_VM_EXEC_CONTROL) &
 		PIN_BASED_VIRTUAL_NMIS;
 }
 
@@ -915,7 +920,6 @@ static inline bool is_exception(u32 intr_info)
 
 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
-			struct vmcs12 *vmcs12,
 			u32 reason, unsigned long qualification);
 
 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
@@ -1220,7 +1224,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 	 * specified above if L1 did not want them.
 	 */
 	if (is_guest_mode(vcpu))
-		eb |= get_vmcs12(vcpu)->exception_bitmap;
+		eb |= vmcs12_read(vcpu, EXCEPTION_BITMAP);
 
 	vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -1582,7 +1586,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
 	vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
 	if (is_guest_mode(vcpu))
 		vcpu->arch.cr0_guest_owned_bits &=
-			~get_vmcs12(vcpu)->cr0_guest_host_mask;
+			~vmcs12_read(vcpu, CR0_GUEST_HOST_MASK);
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
 }
 
@@ -1593,15 +1597,19 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
  * its hypervisor (cr0_read_shadow).
  */
-static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
+static inline unsigned long nested_read_cr0(struct kvm_vcpu *vcpu)
 {
-	return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
-		(fields->cr0_read_shadow & fields->cr0_guest_host_mask);
+	return (vmcs12_read(vcpu, GUEST_CR0) &
+			~vmcs12_read(vcpu, CR0_GUEST_HOST_MASK)) |
+		(vmcs12_read(vcpu, CR0_READ_SHADOW) &
+			vmcs12_read(vcpu, CR0_GUEST_HOST_MASK));
 }
-static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
+static inline unsigned long nested_read_cr4(struct kvm_vcpu *vcpu)
 {
-	return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
-		(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+	return (vmcs12_read(vcpu, GUEST_CR4) &
+			~vmcs12_read(vcpu, CR4_GUEST_HOST_MASK)) |
+		(vmcs12_read(vcpu, CR4_READ_SHADOW) &
+			vmcs12_read(vcpu, CR4_GUEST_HOST_MASK));
 }
 
 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
@@ -1623,10 +1631,10 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 		 * up-to-date here because we just decached cr0.TS (and we'll
 		 * only update vmcs12->guest_cr0 on nested exit).
 		 */
-		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-		vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
+		u64 guest_cr0 = (vmcs12_read(vcpu, GUEST_CR0) & ~X86_CR0_TS) |
 			(vcpu->arch.cr0 & X86_CR0_TS);
-		vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+		vmcs12_write(vcpu, GUEST_CR0, guest_cr0);
+		vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vcpu));
 	} else
 		vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
 }
@@ -1710,10 +1718,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  */
 static int nested_pf_handled(struct kvm_vcpu *vcpu)
 {
-	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
 	/* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
-	if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
+	if (!(vmcs12_read(vcpu, EXCEPTION_BITMAP) & (1u << PF_VECTOR)))
 		return 0;
 
 	nested_vmx_vmexit(vcpu);
@@ -1883,13 +1889,11 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 		 * set for L2 remains unchanged, and still needs to be added
 		 * to the newly set TSC to get L2's TSC.
 		 */
-		struct vmcs12 *vmcs12;
 		to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
 		/* recalculate vmcs02.TSC_OFFSET: */
-		vmcs12 = get_vmcs12(vcpu);
 		vmcs_write64(TSC_OFFSET, offset +
 			(nested_cpu_has(vcpu, CPU_BASED_USE_TSC_OFFSETING) ?
-			 vmcs12->tsc_offset : 0));
+			 vmcs12_read(vcpu, TSC_OFFSET) : 0));
 	} else {
 		vmcs_write64(TSC_OFFSET, offset);
 	}
@@ -3758,7 +3762,7 @@ static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 		vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
 	if (is_guest_mode(&vmx->vcpu))
 		vmx->vcpu.arch.cr4_guest_owned_bits &=
-			~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
+			~vmcs12_read(&vmx->vcpu, CR4_GUEST_HOST_MASK);
 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
 }
 
@@ -4030,7 +4034,7 @@ out:
  */
 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
 {
-	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+	return vmcs12_read(vcpu, PIN_BASED_VM_EXEC_CONTROL) &
 		PIN_BASED_EXT_INTR_MASK;
 }
 
@@ -4170,14 +4174,14 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
-		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 		if (to_vmx(vcpu)->nested.nested_run_pending ||
-		    (vmcs12->idt_vectoring_info_field &
+		    (vmcs12_read(vcpu, IDT_VECTORING_INFO_FIELD) &
 		     VECTORING_INFO_VALID_MASK))
 			return 0;
 		nested_vmx_vmexit(vcpu);
-		vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
-		vmcs12->vm_exit_intr_info = 0;
+		vmcs12_write(vcpu, VM_EXIT_REASON,
+			EXIT_REASON_EXTERNAL_INTERRUPT);
+		vmcs12_write(vcpu, VM_EXIT_INTR_INFO, 0);
 		/* fall through to normal code, but now in L1, not L2 */
 	}
 
@@ -5308,7 +5312,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
 			& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
 			    X86_EFLAGS_SF | X86_EFLAGS_OF))
 			| X86_EFLAGS_ZF);
-	get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
+	vmcs12_write(vcpu, VM_INSTRUCTION_ERROR, vm_instruction_error);
 }
 
 /* Emulate the VMCLEAR instruction */
@@ -5707,7 +5711,7 @@ static const int kvm_vmx_max_exit_handlers =
  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
  */
 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
-	struct vmcs12 *vmcs12, u32 exit_reason)
+	u32 exit_reason)
 {
 	u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
 	gpa_t bitmap;
@@ -5720,7 +5724,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
 	 * for the four combinations of read/write and low/high MSR numbers.
 	 * First we need to figure out which of the four to use:
 	 */
-	bitmap = vmcs12->msr_bitmap;
+	bitmap = vmcs12_read(vcpu, MSR_BITMAP);
 	if (exit_reason == EXIT_REASON_MSR_WRITE)
 		bitmap += 2048;
 	if (msr_index >= 0xc0000000) {
@@ -5742,8 +5746,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
  * intercept (via guest_host_mask etc.) the current event.
  */
-static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
-	struct vmcs12 *vmcs12)
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	int cr = exit_qualification & 15;
@@ -5754,26 +5757,26 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 	case 0: /* mov to cr */
 		switch (cr) {
 		case 0:
-			if (vmcs12->cr0_guest_host_mask &
-			    (val ^ vmcs12->cr0_read_shadow))
+			if (vmcs12_read(vcpu, CR0_GUEST_HOST_MASK) &
+			    (val ^ vmcs12_read(vcpu, CR0_READ_SHADOW)))
 				return 1;
 			break;
 		case 3:
-			if ((vmcs12->cr3_target_count >= 1 &&
-					vmcs12->cr3_target_value0 == val) ||
-				(vmcs12->cr3_target_count >= 2 &&
-					vmcs12->cr3_target_value1 == val) ||
-				(vmcs12->cr3_target_count >= 3 &&
-					vmcs12->cr3_target_value2 == val) ||
-				(vmcs12->cr3_target_count >= 4 &&
-					vmcs12->cr3_target_value3 == val))
+			if ((vmcs12_read(vcpu, CR3_TARGET_COUNT) >= 1 &&
+				vmcs12_read(vcpu, CR3_TARGET_VALUE0) == val) ||
+				(vmcs12_read(vcpu, CR3_TARGET_COUNT) >= 2 &&
+				vmcs12_read(vcpu, CR3_TARGET_VALUE1) == val) ||
+				(vmcs12_read(vcpu, CR3_TARGET_COUNT) >= 3 &&
+				vmcs12_read(vcpu, CR3_TARGET_VALUE2) == val) ||
+				(vmcs12_read(vcpu, CR3_TARGET_COUNT) >= 4 &&
+				vmcs12_read(vcpu, CR3_TARGET_VALUE3) == val))
 				return 0;
 			if (nested_cpu_has(vcpu, CPU_BASED_CR3_LOAD_EXITING))
 				return 1;
 			break;
 		case 4:
-			if (vmcs12->cr4_guest_host_mask &
-			    (vmcs12->cr4_read_shadow ^ val))
+			if (vmcs12_read(vcpu, CR4_GUEST_HOST_MASK) &
+			    (vmcs12_read(vcpu, CR4_READ_SHADOW) ^ val))
 				return 1;
 			break;
 		case 8:
@@ -5783,19 +5786,19 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 		}
 		break;
 	case 2: /* clts */
-		if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
-		    (vmcs12->cr0_read_shadow & X86_CR0_TS))
+		if ((vmcs12_read(vcpu, CR0_GUEST_HOST_MASK) & X86_CR0_TS) &&
+		    (vmcs12_read(vcpu, CR0_READ_SHADOW) & X86_CR0_TS))
 			return 1;
 		break;
 	case 1: /* mov from cr */
 		switch (cr) {
 		case 3:
-			if (vmcs12->cpu_based_vm_exec_control &
+			if (vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) &
 			    CPU_BASED_CR3_STORE_EXITING)
 				return 1;
 			break;
 		case 8:
-			if (vmcs12->cpu_based_vm_exec_control &
+			if (vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) &
 			    CPU_BASED_CR8_STORE_EXITING)
 				return 1;
 			break;
@@ -5806,11 +5809,11 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 		 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
 		 * cr0. Other attempted changes are ignored, with no exit.
 		 */
-		if (vmcs12->cr0_guest_host_mask & 0xe &
-		    (val ^ vmcs12->cr0_read_shadow))
+		if (vmcs12_read(vcpu, CR0_GUEST_HOST_MASK) & 0xe &
+		    (val ^ vmcs12_read(vcpu, CR0_READ_SHADOW)))
 			return 1;
-		if ((vmcs12->cr0_guest_host_mask & 0x1) &&
-		    !(vmcs12->cr0_read_shadow & 0x1) &&
+		if ((vmcs12_read(vcpu, CR0_GUEST_HOST_MASK) & 0x1) &&
+		    !(vmcs12_read(vcpu, CR0_READ_SHADOW) & 0x1) &&
 		    (val & 0x1))
 			return 1;
 		break;
@@ -5828,7 +5831,6 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 	u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
 	u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
 	if (vmx->nested.nested_run_pending)
 		return 0;
@@ -5845,7 +5847,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 			return 0;
 		else if (is_page_fault(intr_info))
 			return enable_ept;
-		return vmcs12->exception_bitmap &
+		return vmcs12_read(vcpu, EXCEPTION_BITMAP) &
 				(1u << (intr_info & INTR_INFO_VECTOR_MASK));
 	case EXIT_REASON_EXTERNAL_INTERRUPT:
 		return 0;
@@ -5885,7 +5887,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 		 */
 		return 1;
 	case EXIT_REASON_CR_ACCESS:
-		return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+		return nested_vmx_exit_handled_cr(vcpu);
 	case EXIT_REASON_DR_ACCESS:
 		return nested_cpu_has(vcpu, CPU_BASED_MOV_DR_EXITING);
 	case EXIT_REASON_IO_INSTRUCTION:
@@ -5893,7 +5895,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 		return 1;
 	case EXIT_REASON_MSR_READ:
 	case EXIT_REASON_MSR_WRITE:
-		return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+		return nested_vmx_exit_handled_msr(vcpu, exit_reason);
 	case EXIT_REASON_INVALID_STATE:
 		return 1;
 	case EXIT_REASON_MWAIT_INSTRUCTION:
@@ -6192,17 +6194,21 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	unsigned long debugctlmsr;
 
 	if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
-		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-		if (vmcs12->idt_vectoring_info_field &
-				VECTORING_INFO_VALID_MASK) {
+		u64 idt_vectoring_info_field = vmcs12_read(vcpu,
+						IDT_VECTORING_INFO_FIELD);
+		u64 vm_exit_instruction_len = vmcs12_read(vcpu,
+						VM_EXIT_INSTRUCTION_LEN);
+		u64 idt_vectoring_error_code = vmcs12_read(vcpu,
+						IDT_VECTORING_ERROR_CODE);
+		if (idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) {
 			vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-				vmcs12->idt_vectoring_info_field);
+				     idt_vectoring_info_field);
 			vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
-				vmcs12->vm_exit_instruction_len);
-			if (vmcs12->idt_vectoring_info_field &
+				     vm_exit_instruction_len);
+			if (idt_vectoring_info_field &
 					VECTORING_INFO_DELIVER_CODE_MASK)
 				vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
-					vmcs12->idt_vectoring_error_code);
+					idt_vectoring_error_code);
 		}
 	}
 
@@ -6365,13 +6371,13 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
 	if (is_guest_mode(vcpu)) {
-		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-		vmcs12->idt_vectoring_info_field = vmx->idt_vectoring_info;
+		vmcs12_write(vcpu, IDT_VECTORING_INFO_FIELD,
+				vmx->idt_vectoring_info);
 		if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
-			vmcs12->idt_vectoring_error_code =
-				vmcs_read32(IDT_VECTORING_ERROR_CODE);
-			vmcs12->vm_exit_instruction_len =
-				vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+			vmcs12_write(vcpu, IDT_VECTORING_ERROR_CODE,
+				vmcs_read32(IDT_VECTORING_ERROR_CODE));
+			vmcs12_write(vcpu, VM_EXIT_INSTRUCTION_LEN,
+				vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
 		}
 	}
 
@@ -6582,71 +6588,75 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
  * function also has additional necessary side-effects, like setting various
  * vcpu->arch fields.
  */
-static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+static void prepare_vmcs02(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 exec_control;
 
-	vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
-	vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
-	vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
-	vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
-	vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
-	vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
-	vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
-	vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
-	vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
-	vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
-	vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
-	vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
-	vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
-	vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
-	vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
-	vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
-	vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
-	vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
-	vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
-	vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
-	vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
-	vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
-	vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
-	vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
-	vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
-	vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
-	vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
-	vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
-	vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
-	vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
-	vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
-	vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
-	vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
-	vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
-	vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
-	vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
-
-	vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+	vmcs_write16(GUEST_ES_SELECTOR, vmcs12_read(vcpu, GUEST_ES_SELECTOR));
+	vmcs_write16(GUEST_CS_SELECTOR, vmcs12_read(vcpu, GUEST_CS_SELECTOR));
+	vmcs_write16(GUEST_SS_SELECTOR, vmcs12_read(vcpu, GUEST_SS_SELECTOR));
+	vmcs_write16(GUEST_DS_SELECTOR, vmcs12_read(vcpu, GUEST_DS_SELECTOR));
+	vmcs_write16(GUEST_FS_SELECTOR, vmcs12_read(vcpu, GUEST_FS_SELECTOR));
+	vmcs_write16(GUEST_GS_SELECTOR, vmcs12_read(vcpu, GUEST_GS_SELECTOR));
+	vmcs_write16(GUEST_LDTR_SELECTOR,
+		vmcs12_read(vcpu, GUEST_LDTR_SELECTOR));
+	vmcs_write16(GUEST_TR_SELECTOR, vmcs12_read(vcpu, GUEST_TR_SELECTOR));
+	vmcs_write32(GUEST_ES_LIMIT, vmcs12_read(vcpu, GUEST_ES_LIMIT));
+	vmcs_write32(GUEST_CS_LIMIT, vmcs12_read(vcpu, GUEST_CS_LIMIT));
+	vmcs_write32(GUEST_SS_LIMIT, vmcs12_read(vcpu, GUEST_SS_LIMIT));
+	vmcs_write32(GUEST_DS_LIMIT, vmcs12_read(vcpu, GUEST_DS_LIMIT));
+	vmcs_write32(GUEST_FS_LIMIT, vmcs12_read(vcpu, GUEST_FS_LIMIT));
+	vmcs_write32(GUEST_GS_LIMIT, vmcs12_read(vcpu, GUEST_GS_LIMIT));
+	vmcs_write32(GUEST_LDTR_LIMIT, vmcs12_read(vcpu, GUEST_LDTR_LIMIT));
+	vmcs_write32(GUEST_TR_LIMIT, vmcs12_read(vcpu, GUEST_TR_LIMIT));
+	vmcs_write32(GUEST_GDTR_LIMIT, vmcs12_read(vcpu, GUEST_GDTR_LIMIT));
+	vmcs_write32(GUEST_IDTR_LIMIT, vmcs12_read(vcpu, GUEST_IDTR_LIMIT));
+	vmcs_write32(GUEST_ES_AR_BYTES, vmcs12_read(vcpu, GUEST_ES_AR_BYTES));
+	vmcs_write32(GUEST_CS_AR_BYTES, vmcs12_read(vcpu, GUEST_CS_AR_BYTES));
+	vmcs_write32(GUEST_SS_AR_BYTES, vmcs12_read(vcpu, GUEST_SS_AR_BYTES));
+	vmcs_write32(GUEST_DS_AR_BYTES, vmcs12_read(vcpu, GUEST_DS_AR_BYTES));
+	vmcs_write32(GUEST_FS_AR_BYTES, vmcs12_read(vcpu, GUEST_FS_AR_BYTES));
+	vmcs_write32(GUEST_GS_AR_BYTES, vmcs12_read(vcpu, GUEST_GS_AR_BYTES));
+	vmcs_write32(GUEST_LDTR_AR_BYTES,
+		vmcs12_read(vcpu, GUEST_LDTR_AR_BYTES));
+	vmcs_write32(GUEST_TR_AR_BYTES, vmcs12_read(vcpu, GUEST_TR_AR_BYTES));
+	vmcs_writel(GUEST_ES_BASE, vmcs12_read(vcpu, GUEST_ES_BASE));
+	vmcs_writel(GUEST_CS_BASE, vmcs12_read(vcpu, GUEST_CS_BASE));
+	vmcs_writel(GUEST_SS_BASE, vmcs12_read(vcpu, GUEST_SS_BASE));
+	vmcs_writel(GUEST_DS_BASE, vmcs12_read(vcpu, GUEST_DS_BASE));
+	vmcs_writel(GUEST_FS_BASE, vmcs12_read(vcpu, GUEST_FS_BASE));
+	vmcs_writel(GUEST_GS_BASE, vmcs12_read(vcpu, GUEST_GS_BASE));
+	vmcs_writel(GUEST_LDTR_BASE, vmcs12_read(vcpu, GUEST_LDTR_BASE));
+	vmcs_writel(GUEST_TR_BASE, vmcs12_read(vcpu, GUEST_TR_BASE));
+	vmcs_writel(GUEST_GDTR_BASE, vmcs12_read(vcpu, GUEST_GDTR_BASE));
+	vmcs_writel(GUEST_IDTR_BASE, vmcs12_read(vcpu, GUEST_IDTR_BASE));
+
+	vmcs_write64(GUEST_IA32_DEBUGCTL,
+		vmcs12_read(vcpu, GUEST_IA32_DEBUGCTL));
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-		vmcs12->vm_entry_intr_info_field);
+		vmcs12_read(vcpu, VM_ENTRY_INTR_INFO_FIELD));
 	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
-		vmcs12->vm_entry_exception_error_code);
+		vmcs12_read(vcpu, VM_ENTRY_EXCEPTION_ERROR_CODE));
 	vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
-		vmcs12->vm_entry_instruction_len);
+		vmcs12_read(vcpu, VM_ENTRY_INSTRUCTION_LEN));
 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
-		vmcs12->guest_interruptibility_info);
-	vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state);
-	vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
-	vmcs_writel(GUEST_DR7, vmcs12->guest_dr7);
-	vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
+		vmcs12_read(vcpu, GUEST_INTERRUPTIBILITY_INFO));
+	vmcs_write32(GUEST_ACTIVITY_STATE,
+		vmcs12_read(vcpu, GUEST_ACTIVITY_STATE));
+	vmcs_write32(GUEST_SYSENTER_CS, vmcs12_read(vcpu, GUEST_SYSENTER_CS));
+	vmcs_writel(GUEST_DR7, vmcs12_read(vcpu, GUEST_DR7));
+	vmcs_writel(GUEST_RFLAGS, vmcs12_read(vcpu, GUEST_RFLAGS));
 	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
-		vmcs12->guest_pending_dbg_exceptions);
-	vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
-	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+		vmcs12_read(vcpu, GUEST_PENDING_DBG_EXCEPTIONS));
+	vmcs_writel(GUEST_SYSENTER_ESP, vmcs12_read(vcpu, GUEST_SYSENTER_ESP));
+	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12_read(vcpu, GUEST_SYSENTER_EIP));
 
 	vmcs_write64(VMCS_LINK_POINTER, -1ull);
 
 	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
 		(vmcs_config.pin_based_exec_ctrl |
-		 vmcs12->pin_based_vm_exec_control));
+		 vmcs12_read(vcpu, PIN_BASED_VM_EXEC_CONTROL)));
 
 	/*
 	 * Whether page-faults are trapped is determined by a combination of
@@ -6669,9 +6679,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	 * page tables), using walk_addr(), when injecting PFs to L1.
 	 */
 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
-		enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+		enable_ept ? vmcs12_read(vcpu, PAGE_FAULT_ERROR_CODE_MASK) : 0);
 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
-		enable_ept ? vmcs12->page_fault_error_code_match : 0);
+		enable_ept ?
+		vmcs12_read(vcpu, PAGE_FAULT_ERROR_CODE_MATCH) :
+		0);
 
 	if (cpu_has_secondary_exec_ctrls()) {
 		u32 exec_control = vmx_secondary_exec_control(vmx);
@@ -6680,7 +6692,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 		/* Take the following fields only from vmcs12 */
 		exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
 		if (nested_cpu_has(vcpu, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
-			exec_control |= vmcs12->secondary_vm_exec_control;
+			exec_control |=
+				vmcs12_read(vcpu, SECONDARY_VM_EXEC_CONTROL);
 
 		if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
 			/*
@@ -6692,7 +6705,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 			if (vmx->nested.apic_access_page) /* shouldn't happen */
 				nested_release_page(vmx->nested.apic_access_page);
 			vmx->nested.apic_access_page =
-				nested_get_page(vcpu, vmcs12->apic_access_addr);
+				nested_get_page(vcpu,
+					vmcs12_read(vcpu, APIC_ACCESS_ADDR));
 			/*
 			 * If translation failed, no matter: This feature asks
 			 * to exit when accessing the given address, and if it
@@ -6732,7 +6746,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
 	exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
 	exec_control &= ~CPU_BASED_TPR_SHADOW;
-	exec_control |= vmcs12->cpu_based_vm_exec_control;
+	exec_control |= vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL);
 	/*
 	 * Merging of IO and MSR bitmaps not currently supported.
 	 * Rather, exit every time.
@@ -6748,26 +6762,29 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	 * trap. Note that CR0.TS also needs updating - we do this later.
 	 */
 	update_exception_bitmap(vcpu);
-	vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
+	vcpu->arch.cr0_guest_owned_bits &=
+		~vmcs12_read(vcpu, CR0_GUEST_HOST_MASK);
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
 
 	/* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
 	vmcs_write32(VM_EXIT_CONTROLS,
-		vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
-	vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
+		vmcs12_read(vcpu, VM_EXIT_CONTROLS) | vmcs_config.vmexit_ctrl);
+	vmcs_write32(VM_ENTRY_CONTROLS, vmcs12_read(vcpu, VM_ENTRY_CONTROLS) |
 		(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
 
-	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
-		vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
+	if (vmcs12_read(vcpu, VM_ENTRY_CONTROLS) & VM_ENTRY_LOAD_IA32_PAT)
+		vmcs_write64(GUEST_IA32_PAT, vmcs12_read(vcpu, GUEST_IA32_PAT));
 	else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
 
 
 	set_cr4_guest_host_mask(vmx);
 
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+	if (vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) &
+		CPU_BASED_USE_TSC_OFFSETING)
 		vmcs_write64(TSC_OFFSET,
-			vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+			vmx->nested.vmcs01_tsc_offset +
+				vmcs12_read(vcpu, TSC_OFFSET));
 	else
 		vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
 
@@ -6781,9 +6798,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 		vmx_flush_tlb(vcpu);
 	}
 
-	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
-		vcpu->arch.efer = vmcs12->guest_ia32_efer;
-	if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+	if (vmcs12_read(vcpu, VM_ENTRY_CONTROLS) & VM_ENTRY_LOAD_IA32_EFER)
+		vcpu->arch.efer = vmcs12_read(vcpu, GUEST_IA32_EFER);
+	if (vmcs12_read(vcpu, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE)
 		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
 	else
 		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
@@ -6798,18 +6815,18 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
 	 * have more bits than L1 expected.
 	 */
-	vmx_set_cr0(vcpu, vmcs12->guest_cr0);
-	vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+	vmx_set_cr0(vcpu, vmcs12_read(vcpu, GUEST_CR0));
+	vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vcpu));
 
-	vmx_set_cr4(vcpu, vmcs12->guest_cr4);
-	vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
+	vmx_set_cr4(vcpu, vmcs12_read(vcpu, GUEST_CR4));
+	vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vcpu));
 
 	/* shadow page tables on either EPT or shadow page tables */
-	kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+	kvm_set_cr3(vcpu, vmcs12_read(vcpu, GUEST_CR3));
 	kvm_mmu_reset_context(vcpu);
 
-	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
-	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12_read(vcpu, GUEST_RSP));
+	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12_read(vcpu, GUEST_RIP));
 }
 
 /*
@@ -6847,59 +6864,64 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 		return 1;
 	}
 
-	if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
-			!IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+	if ((vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL) &
+		CPU_BASED_USE_MSR_BITMAPS) &&
+			!IS_ALIGNED(vmcs12_read(vcpu, MSR_BITMAP), PAGE_SIZE)) {
 		/*TODO: Also verify bits beyond physical address width are 0*/
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
 	}
 
 	if (nested_cpu_has2(vcpu, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
-			!IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+		!IS_ALIGNED(vmcs12_read(vcpu, APIC_ACCESS_ADDR), PAGE_SIZE)) {
 		/*TODO: Also verify bits beyond physical address width are 0*/
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
 	}
 
-	if (vmcs12->vm_entry_msr_load_count > 0 ||
-	    vmcs12->vm_exit_msr_load_count > 0 ||
-	    vmcs12->vm_exit_msr_store_count > 0) {
+	if (vmcs12_read(vcpu, VM_ENTRY_MSR_LOAD_COUNT) > 0 ||
+	    vmcs12_read(vcpu, VM_EXIT_MSR_LOAD_COUNT) > 0 ||
+	    vmcs12_read(vcpu, VM_EXIT_MSR_STORE_COUNT) > 0) {
 		pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
 				    __func__);
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
 	}
 
-	if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+	if (!vmx_control_verify(vmcs12_read(vcpu, CPU_BASED_VM_EXEC_CONTROL),
 	      nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
-	    !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+	    !vmx_control_verify(vmcs12_read(vcpu, SECONDARY_VM_EXEC_CONTROL),
 	      nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
-	    !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
+	    !vmx_control_verify(vmcs12_read(vcpu, PIN_BASED_VM_EXEC_CONTROL),
 	      nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
-	    !vmx_control_verify(vmcs12->vm_exit_controls,
+	    !vmx_control_verify(vmcs12_read(vcpu, VM_EXIT_CONTROLS),
 	      nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
-	    !vmx_control_verify(vmcs12->vm_entry_controls,
+	    !vmx_control_verify(vmcs12_read(vcpu, VM_ENTRY_CONTROLS),
 	      nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
 	{
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
 	}
 
-	if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
-	    ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+	if (((vmcs12_read(vcpu, HOST_CR0) & VMXON_CR0_ALWAYSON) !=
+		VMXON_CR0_ALWAYSON) ||
+	    ((vmcs12_read(vcpu, HOST_CR4) & VMXON_CR4_ALWAYSON) !=
+		VMXON_CR4_ALWAYSON)) {
 		nested_vmx_failValid(vcpu,
 			VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
 		return 1;
 	}
 
-	if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
-	    ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
-		nested_vmx_entry_failure(vcpu, vmcs12,
+	if (((vmcs12_read(vcpu, GUEST_CR0) & VMXON_CR0_ALWAYSON) !=
+		VMXON_CR0_ALWAYSON) ||
+	    ((vmcs12_read(vcpu, GUEST_CR4) & VMXON_CR4_ALWAYSON) !=
+		VMXON_CR4_ALWAYSON)) {
+		nested_vmx_entry_failure(vcpu,
 			EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
 		return 1;
 	}
-	if (vmcs12->vmcs_link_pointer != -1ull) {
-		nested_vmx_entry_failure(vcpu, vmcs12,
+	if (vmcs12_read(vcpu, VMCS_LINK_POINTER) != -1ull) {
+		nested_vmx_entry_failure(vcpu,
 			EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
 		return 1;
 	}
@@ -6926,7 +6948,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 
 	vmcs12->launch_state = 1;
 
-	prepare_vmcs02(vcpu, vmcs12);
+	prepare_vmcs02(vcpu);
 
 	/*
 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -6955,22 +6977,26 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
  */
 static inline unsigned long
-vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+vmcs12_guest_cr0(struct kvm_vcpu *vcpu)
 {
 	return
 	/*1*/	(vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
-	/*2*/	(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
-	/*3*/	(vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
+	/*2*/	(vmcs12_read(vcpu, GUEST_CR0) &
+			vmcs12_read(vcpu, CR0_GUEST_HOST_MASK)) |
+	/*3*/	(vmcs_readl(CR0_READ_SHADOW) &
+			~(vmcs12_read(vcpu, CR0_GUEST_HOST_MASK) |
 			vcpu->arch.cr0_guest_owned_bits));
 }
 
 static inline unsigned long
-vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+vmcs12_guest_cr4(struct kvm_vcpu *vcpu)
 {
 	return
 	/*1*/	(vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
-	/*2*/	(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
-	/*3*/	(vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
+	/*2*/	(vmcs12_read(vcpu, GUEST_CR4) &
+			vmcs12_read(vcpu, CR4_GUEST_HOST_MASK)) |
+	/*3*/	(vmcs_readl(CR4_READ_SHADOW) &
+			~(vmcs12_read(vcpu, CR4_GUEST_HOST_MASK) |
 			vcpu->arch.cr4_guest_owned_bits));
 }
 
@@ -6985,86 +7011,100 @@ vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
  * which already writes to vmcs12 directly.
  */
-void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+void prepare_vmcs12(struct kvm_vcpu *vcpu)
 {
+	u64 guest_dr7;
 	/* update guest state fields: */
-	vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
-	vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
-
-	kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
-	vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
-	vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
-	vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
-
-	vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
-	vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
-	vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
-	vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
-	vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
-	vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
-	vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
-	vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
-	vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
-	vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
-	vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
-	vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
-	vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
-	vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
-	vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
-	vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
-	vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
-	vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
-	vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
-	vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
-	vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
-	vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
-	vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
-	vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
-	vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
-	vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
-	vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
-	vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
-	vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
-	vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
-	vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
-	vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
-	vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
-	vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
-	vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
-	vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
-
-	vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
-	vmcs12->guest_interruptibility_info =
-		vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
-	vmcs12->guest_pending_dbg_exceptions =
-		vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+	vmcs12_write(vcpu, GUEST_CR0, vmcs12_guest_cr0(vcpu));
+	vmcs12_write(vcpu, GUEST_CR4, vmcs12_guest_cr4(vcpu));
+
+	kvm_get_dr(vcpu, 7, (unsigned long *)&guest_dr7);
+	vmcs12_write(vcpu, GUEST_DR7, guest_dr7);
+	vmcs12_write(vcpu, GUEST_RSP, kvm_register_read(vcpu, VCPU_REGS_RSP));
+	vmcs12_write(vcpu, GUEST_RIP, kvm_register_read(vcpu, VCPU_REGS_RIP));
+	vmcs12_write(vcpu, GUEST_RFLAGS, vmcs_readl(GUEST_RFLAGS));
+
+	vmcs12_write(vcpu, GUEST_ES_SELECTOR, vmcs_read16(GUEST_ES_SELECTOR));
+	vmcs12_write(vcpu, GUEST_CS_SELECTOR, vmcs_read16(GUEST_CS_SELECTOR));
+	vmcs12_write(vcpu, GUEST_SS_SELECTOR, vmcs_read16(GUEST_SS_SELECTOR));
+	vmcs12_write(vcpu, GUEST_DS_SELECTOR, vmcs_read16(GUEST_DS_SELECTOR));
+	vmcs12_write(vcpu, GUEST_FS_SELECTOR, vmcs_read16(GUEST_FS_SELECTOR));
+	vmcs12_write(vcpu, GUEST_GS_SELECTOR, vmcs_read16(GUEST_GS_SELECTOR));
+	vmcs12_write(vcpu, GUEST_LDTR_SELECTOR,
+		vmcs_read16(GUEST_LDTR_SELECTOR));
+	vmcs12_write(vcpu, GUEST_TR_SELECTOR, vmcs_read16(GUEST_TR_SELECTOR));
+	vmcs12_write(vcpu, GUEST_ES_LIMIT, vmcs_read32(GUEST_ES_LIMIT));
+	vmcs12_write(vcpu, GUEST_CS_LIMIT, vmcs_read32(GUEST_CS_LIMIT));
+	vmcs12_write(vcpu, GUEST_SS_LIMIT, vmcs_read32(GUEST_SS_LIMIT));
+	vmcs12_write(vcpu, GUEST_DS_LIMIT, vmcs_read32(GUEST_DS_LIMIT));
+	vmcs12_write(vcpu, GUEST_FS_LIMIT, vmcs_read32(GUEST_FS_LIMIT));
+	vmcs12_write(vcpu, GUEST_GS_LIMIT, vmcs_read32(GUEST_GS_LIMIT));
+	vmcs12_write(vcpu, GUEST_LDTR_LIMIT, vmcs_read32(GUEST_LDTR_LIMIT));
+	vmcs12_write(vcpu, GUEST_TR_LIMIT, vmcs_read32(GUEST_TR_LIMIT));
+	vmcs12_write(vcpu, GUEST_GDTR_LIMIT, vmcs_read32(GUEST_GDTR_LIMIT));
+	vmcs12_write(vcpu, GUEST_IDTR_LIMIT, vmcs_read32(GUEST_IDTR_LIMIT));
+	vmcs12_write(vcpu, GUEST_ES_AR_BYTES, vmcs_read32(GUEST_ES_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_CS_AR_BYTES, vmcs_read32(GUEST_CS_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_SS_AR_BYTES, vmcs_read32(GUEST_SS_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_DS_AR_BYTES, vmcs_read32(GUEST_DS_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_FS_AR_BYTES, vmcs_read32(GUEST_FS_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_GS_AR_BYTES, vmcs_read32(GUEST_GS_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_LDTR_AR_BYTES,
+		vmcs_read32(GUEST_LDTR_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_TR_AR_BYTES, vmcs_read32(GUEST_TR_AR_BYTES));
+	vmcs12_write(vcpu, GUEST_ES_BASE, vmcs_readl(GUEST_ES_BASE));
+	vmcs12_write(vcpu, GUEST_CS_BASE, vmcs_readl(GUEST_CS_BASE));
+	vmcs12_write(vcpu, GUEST_SS_BASE, vmcs_readl(GUEST_SS_BASE));
+	vmcs12_write(vcpu, GUEST_DS_BASE, vmcs_readl(GUEST_DS_BASE));
+	vmcs12_write(vcpu, GUEST_FS_BASE, vmcs_readl(GUEST_FS_BASE));
+	vmcs12_write(vcpu, GUEST_GS_BASE, vmcs_readl(GUEST_GS_BASE));
+	vmcs12_write(vcpu, GUEST_LDTR_BASE, vmcs_readl(GUEST_LDTR_BASE));
+	vmcs12_write(vcpu, GUEST_TR_BASE, vmcs_readl(GUEST_TR_BASE));
+	vmcs12_write(vcpu, GUEST_GDTR_BASE, vmcs_readl(GUEST_GDTR_BASE));
+	vmcs12_write(vcpu, GUEST_IDTR_BASE, vmcs_readl(GUEST_IDTR_BASE));
+
+	vmcs12_write(vcpu, GUEST_ACTIVITY_STATE,
+		vmcs_read32(GUEST_ACTIVITY_STATE));
+	vmcs12_write(vcpu, GUEST_INTERRUPTIBILITY_INFO,
+		vmcs_read32(GUEST_INTERRUPTIBILITY_INFO));
+	vmcs12_write(vcpu, GUEST_PENDING_DBG_EXCEPTIONS,
+		vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
 
 	/* TODO: These cannot have changed unless we have MSR bitmaps and
 	 * the relevant bit asks not to trap the change */
-	vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
-	if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT)
-		vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
-	vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
-	vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
-	vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+	vmcs12_write(vcpu, GUEST_IA32_DEBUGCTL,
+		vmcs_read64(GUEST_IA32_DEBUGCTL));
+	if (vmcs12_read(vcpu, VM_ENTRY_CONTROLS) & VM_EXIT_SAVE_IA32_PAT)
+		vmcs12_write(vcpu, GUEST_IA32_PAT,
+			vmcs_read64(GUEST_IA32_PAT));
+	vmcs12_write(vcpu, GUEST_SYSENTER_CS, vmcs_read32(GUEST_SYSENTER_CS));
+	vmcs12_write(vcpu, GUEST_SYSENTER_ESP, vmcs_readl(GUEST_SYSENTER_ESP));
+	vmcs12_write(vcpu, GUEST_SYSENTER_EIP, vmcs_readl(GUEST_SYSENTER_EIP));
 
 	/* update exit information fields: */
 
-	vmcs12->vm_exit_reason  = vmcs_read32(VM_EXIT_REASON);
-	vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
-	vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-	vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
-	vmcs12->idt_vectoring_info_field =
-		vmcs_read32(IDT_VECTORING_INFO_FIELD);
-	vmcs12->idt_vectoring_error_code =
-		vmcs_read32(IDT_VECTORING_ERROR_CODE);
-	vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
-	vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	vmcs12_write(vcpu, VM_EXIT_REASON, vmcs_read32(VM_EXIT_REASON));
+	vmcs12_write(vcpu, EXIT_QUALIFICATION, vmcs_readl(EXIT_QUALIFICATION));
+
+	vmcs12_write(vcpu, VM_EXIT_INTR_INFO, vmcs_read32(VM_EXIT_INTR_INFO));
+	vmcs12_write(vcpu, VM_EXIT_INTR_ERROR_CODE,
+		vmcs_read32(VM_EXIT_INTR_ERROR_CODE));
+	vmcs12_write(vcpu, IDT_VECTORING_INFO_FIELD,
+		vmcs_read32(IDT_VECTORING_INFO_FIELD));
+	vmcs12_write(vcpu, IDT_VECTORING_ERROR_CODE,
+		vmcs_read32(IDT_VECTORING_ERROR_CODE));
+	vmcs12_write(vcpu, VM_EXIT_INSTRUCTION_LEN,
+		vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
+	vmcs12_write(vcpu, VMX_INSTRUCTION_INFO,
+		vmcs_read32(VMX_INSTRUCTION_INFO));
 
 	/* clear vm-entry fields which are to be cleared on exit */
-	if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
-		vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
+	if (!(vmcs12_read(vcpu, VM_EXIT_REASON) &
+		VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+		u64 intr_info = vmcs12_read(vcpu, VM_ENTRY_INTR_INFO_FIELD);
+		vmcs12_write(vcpu, VM_ENTRY_INTR_INFO_FIELD,
+			intr_info & ~INTR_INFO_VALID_MASK);
+	}
 }
 
 /*
@@ -7076,25 +7116,25 @@ void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  * Failures During or After Loading Guest State").
  * This function should be called when the active VMCS is L1's (vmcs01).
  */
-void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+void load_vmcs12_host_state(struct kvm_vcpu *vcpu)
 {
-	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
-		vcpu->arch.efer = vmcs12->host_ia32_efer;
-	if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+	if (vmcs12_read(vcpu, VM_EXIT_CONTROLS) & VM_EXIT_LOAD_IA32_EFER)
+		vcpu->arch.efer = vmcs12_read(vcpu, HOST_IA32_EFER);
+	if (vmcs12_read(vcpu, VM_EXIT_CONTROLS) & VM_EXIT_HOST_ADDR_SPACE_SIZE)
 		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
 	else
 		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
 	vmx_set_efer(vcpu, vcpu->arch.efer);
 
-	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
-	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12_read(vcpu, HOST_RSP));
+	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12_read(vcpu, HOST_RIP));
 	/*
 	 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
 	 * actually changed, because it depends on the current state of
 	 * fpu_active (which may have changed).
 	 * Note that vmx_set_cr0 refers to efer set above.
 	 */
-	kvm_set_cr0(vcpu, vmcs12->host_cr0);
+	kvm_set_cr0(vcpu, vmcs12_read(vcpu, HOST_CR0));
 	/*
 	 * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
 	 * to apply the same changes to L1's vmcs. We just set cr0 correctly,
@@ -7109,10 +7149,10 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
 	 */
 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
-	kvm_set_cr4(vcpu, vmcs12->host_cr4);
+	kvm_set_cr4(vcpu, vmcs12_read(vcpu, HOST_CR4));
 
 	/* shadow page tables on either EPT or shadow page tables */
-	kvm_set_cr3(vcpu, vmcs12->host_cr3);
+	kvm_set_cr3(vcpu, vmcs12_read(vcpu, HOST_CR3));
 	kvm_mmu_reset_context(vcpu);
 
 	if (enable_vpid) {
@@ -7125,27 +7165,31 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	}
 
 
-	vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
-	vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
-	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
-	vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
-	vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
-	vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
-	vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
-	vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
-	vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
-	vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
-	vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
-	vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
-	vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
-	vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
-	vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
-
-	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
-		vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
-	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+	vmcs_write32(GUEST_SYSENTER_CS,
+		vmcs12_read(vcpu, HOST_IA32_SYSENTER_CS));
+	vmcs_writel(GUEST_SYSENTER_ESP,
+		vmcs12_read(vcpu, HOST_IA32_SYSENTER_ESP));
+	vmcs_writel(GUEST_SYSENTER_EIP,
+		vmcs12_read(vcpu, HOST_IA32_SYSENTER_EIP));
+	vmcs_writel(GUEST_IDTR_BASE, vmcs12_read(vcpu, HOST_IDTR_BASE));
+	vmcs_writel(GUEST_GDTR_BASE, vmcs12_read(vcpu, HOST_GDTR_BASE));
+	vmcs_writel(GUEST_TR_BASE, vmcs12_read(vcpu, HOST_TR_BASE));
+	vmcs_writel(GUEST_GS_BASE, vmcs12_read(vcpu, HOST_GS_BASE));
+	vmcs_writel(GUEST_FS_BASE, vmcs12_read(vcpu, HOST_FS_BASE));
+	vmcs_write16(GUEST_ES_SELECTOR, vmcs12_read(vcpu, HOST_ES_SELECTOR));
+	vmcs_write16(GUEST_CS_SELECTOR, vmcs12_read(vcpu, HOST_CS_SELECTOR));
+	vmcs_write16(GUEST_SS_SELECTOR, vmcs12_read(vcpu, HOST_SS_SELECTOR));
+	vmcs_write16(GUEST_DS_SELECTOR, vmcs12_read(vcpu, HOST_DS_SELECTOR));
+	vmcs_write16(GUEST_FS_SELECTOR, vmcs12_read(vcpu, HOST_FS_SELECTOR));
+	vmcs_write16(GUEST_GS_SELECTOR, vmcs12_read(vcpu, HOST_GS_SELECTOR));
+	vmcs_write16(GUEST_TR_SELECTOR, vmcs12_read(vcpu, HOST_TR_SELECTOR));
+
+	if (vmcs12_read(vcpu, VM_EXIT_CONTROLS) & VM_EXIT_LOAD_IA32_PAT)
+		vmcs_write64(GUEST_IA32_PAT, vmcs12_read(vcpu, HOST_IA32_PAT));
+	if (vmcs12_read(vcpu, VM_EXIT_CONTROLS) &
+		VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
 		vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
-			vmcs12->host_ia32_perf_global_ctrl);
+			vmcs12_read(vcpu, HOST_IA32_PERF_GLOBAL_CTRL));
 }
 
 /*
@@ -7157,10 +7201,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int cpu;
-	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
 	leave_guest_mode(vcpu);
-	prepare_vmcs12(vcpu, vmcs12);
+	prepare_vmcs12(vcpu);
 
 	cpu = get_cpu();
 	vmx->loaded_vmcs = &vmx->vmcs01;
@@ -7173,7 +7216,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
 	if (VMCS02_POOL_SIZE == 0)
 		nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
 
-	load_vmcs12_host_state(vcpu, vmcs12);
+	load_vmcs12_host_state(vcpu);
 
 	/* Update TSC_OFFSET if TSC was changed while L2 ran */
 	vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
@@ -7207,12 +7250,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
  * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
  */
 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
-			struct vmcs12 *vmcs12,
 			u32 reason, unsigned long qualification)
 {
-	load_vmcs12_host_state(vcpu, vmcs12);
-	vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
-	vmcs12->exit_qualification = qualification;
+	load_vmcs12_host_state(vcpu);
+	vmcs12_write(vcpu, VM_EXIT_REASON,
+		reason | VMX_EXIT_REASONS_FAILED_VMENTRY);
+	vmcs12_write(vcpu, EXIT_QUALIFICATION, qualification);
 	nested_vmx_succeed(vcpu);
 }
 
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux