Re: [PATCH 3/5] Nested VMX patch 3 implements vmptrld and vmptrst

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Oct 15, 2009 at 04:41:44PM +0200, oritw@xxxxxxxxxx wrote:
> From: Orit Wasserman <oritw@xxxxxxxxxx>
> 
> ---
>  arch/x86/kvm/vmx.c |  468 ++++++++++++++++++++++++++++++++++++++++++++++++++--
>  arch/x86/kvm/x86.c |    3 +-
>  2 files changed, 459 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 411cbdb..8c186e0 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -61,20 +61,168 @@ module_param_named(unrestricted_guest,
>  static int __read_mostly emulate_invalid_guest_state = 0;
>  module_param(emulate_invalid_guest_state, bool, S_IRUGO);
>  
> +
> +struct __attribute__ ((__packed__)) shadow_vmcs {
> +	u32 revision_id;
> +	u32 abort;
> +	u16 virtual_processor_id;
> +	u16 guest_es_selector;
> +	u16 guest_cs_selector;
> +	u16 guest_ss_selector;
> +	u16 guest_ds_selector;
> +	u16 guest_fs_selector;
> +	u16 guest_gs_selector;
> +	u16 guest_ldtr_selector;
> +	u16 guest_tr_selector;
> +	u16 host_es_selector;
> +	u16 host_cs_selector;
> +	u16 host_ss_selector;
> +	u16 host_ds_selector;
> +	u16 host_fs_selector;
> +	u16 host_gs_selector;
> +	u16 host_tr_selector;
> +	u64 io_bitmap_a;
> +	u64 io_bitmap_b;
> +	u64 msr_bitmap;
> +	u64 vm_exit_msr_store_addr;
> +	u64 vm_exit_msr_load_addr;
> +	u64 vm_entry_msr_load_addr;
> +	u64 tsc_offset;
> +	u64 virtual_apic_page_addr;
> +	u64 apic_access_addr;
> +	u64 ept_pointer;
> +	u64 guest_physical_address;
> +	u64 vmcs_link_pointer;
> +	u64 guest_ia32_debugctl;
> +	u64 guest_ia32_pat;
> +	u64 guest_pdptr0;
> +	u64 guest_pdptr1;
> +	u64 guest_pdptr2;
> +	u64 guest_pdptr3;
> +	u64 host_ia32_pat;
> +	u32 pin_based_vm_exec_control;
> +	u32 cpu_based_vm_exec_control;
> +	u32 exception_bitmap;
> +	u32 page_fault_error_code_mask;
> +	u32 page_fault_error_code_match;
> +	u32 cr3_target_count;
> +	u32 vm_exit_controls;
> +	u32 vm_exit_msr_store_count;
> +	u32 vm_exit_msr_load_count;
> +	u32 vm_entry_controls;
> +	u32 vm_entry_msr_load_count;
> +	u32 vm_entry_intr_info_field;
> +	u32 vm_entry_exception_error_code;
> +	u32 vm_entry_instruction_len;
> +	u32 tpr_threshold;
> +	u32 secondary_vm_exec_control;
> +	u32 vm_instruction_error;
> +	u32 vm_exit_reason;
> +	u32 vm_exit_intr_info;
> +	u32 vm_exit_intr_error_code;
> +	u32 idt_vectoring_info_field;
> +	u32 idt_vectoring_error_code;
> +	u32 vm_exit_instruction_len;
> +	u32 vmx_instruction_info;
> +	u32 guest_es_limit;
> +	u32 guest_cs_limit;
> +	u32 guest_ss_limit;
> +	u32 guest_ds_limit;
> +	u32 guest_fs_limit;
> +	u32 guest_gs_limit;
> +	u32 guest_ldtr_limit;
> +	u32 guest_tr_limit;
> +	u32 guest_gdtr_limit;
> +	u32 guest_idtr_limit;
> +	u32 guest_es_ar_bytes;
> +	u32 guest_cs_ar_bytes;
> +	u32 guest_ss_ar_bytes;
> +	u32 guest_ds_ar_bytes;
> +	u32 guest_fs_ar_bytes;
> +	u32 guest_gs_ar_bytes;
> +	u32 guest_ldtr_ar_bytes;
> +	u32 guest_tr_ar_bytes;
> +	u32 guest_interruptibility_info;
> +	u32 guest_activity_state;
> +	u32 guest_sysenter_cs;
> +	u32 host_ia32_sysenter_cs;
> +	unsigned long cr0_guest_host_mask;
> +	unsigned long cr4_guest_host_mask;
> +	unsigned long cr0_read_shadow;
> +	unsigned long cr4_read_shadow;
> +	unsigned long cr3_target_value0;
> +	unsigned long cr3_target_value1;
> +	unsigned long cr3_target_value2;
> +	unsigned long cr3_target_value3;
> +	unsigned long exit_qualification;
> +	unsigned long guest_linear_address;
> +	unsigned long guest_cr0;
> +	unsigned long guest_cr3;
> +	unsigned long guest_cr4;
> +	unsigned long guest_es_base;
> +	unsigned long guest_cs_base;
> +	unsigned long guest_ss_base;
> +	unsigned long guest_ds_base;
> +	unsigned long guest_fs_base;
> +	unsigned long guest_gs_base;
> +	unsigned long guest_ldtr_base;
> +	unsigned long guest_tr_base;
> +	unsigned long guest_gdtr_base;
> +	unsigned long guest_idtr_base;
> +	unsigned long guest_dr7;
> +	unsigned long guest_rsp;
> +	unsigned long guest_rip;
> +	unsigned long guest_rflags;
> +	unsigned long guest_pending_dbg_exceptions;
> +	unsigned long guest_sysenter_esp;
> +	unsigned long guest_sysenter_eip;
> +	unsigned long host_cr0;
> +	unsigned long host_cr3;
> +	unsigned long host_cr4;
> +	unsigned long host_fs_base;
> +	unsigned long host_gs_base;
> +	unsigned long host_tr_base;
> +	unsigned long host_gdtr_base;
> +	unsigned long host_idtr_base;
> +	unsigned long host_ia32_sysenter_esp;
> +	unsigned long host_ia32_sysenter_eip;
> +	unsigned long host_rsp;
> +	unsigned long host_rip;
> +};
> +
>  struct __attribute__ ((__packed__)) level_state {
>  	/* Has the level1 guest done vmclear? */
>  	bool vmclear;
> +	u16 vpid;
> +	u64 shadow_efer;
> +	unsigned long cr2;
> +	unsigned long cr3;
> +	unsigned long cr4;
> +	unsigned long cr8;
> +
> +	u64 io_bitmap_a;
> +	u64 io_bitmap_b;
> +	u64 msr_bitmap;
> +
> +	struct shadow_vmcs *shadow_vmcs;
> +
> +	struct vmcs *vmcs;
> +	int cpu;
> +	int launched;
>  };
>  
>  struct nested_vmx {
>  	/* Has the level1 guest done vmxon? */
>  	bool vmxon;
> -
> +	/* What is the location of the  vmcs l1 keeps for l2? (in level1 gpa) */
> +	u64 vmptr;
>  	/*
>  	 * Level 2 state : includes vmcs,registers and
>  	 * a copy of vmcs12 for vmread/vmwrite
>  	 */
>  	struct level_state *l2_state;
> +	/* Level 1 state for switching to level 2 and back */
> +	struct level_state *l1_state;
>  };
>  
>  struct vmcs {
> @@ -140,6 +288,25 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
>  	return container_of(vcpu, struct vcpu_vmx, vcpu);
>  }
>  
> +static struct page *nested_get_page(struct kvm_vcpu *vcpu,
> +				    u64 vmcs_addr)
> +{
> +	struct page *vmcs_page = NULL;
> +
> +	down_read(&current->mm->mmap_sem);
> +	vmcs_page = gfn_to_page(vcpu->kvm, vmcs_addr >> PAGE_SHIFT);
> +	up_read(&current->mm->mmap_sem);
> +
> +	if (is_error_page(vmcs_page)) {
> +		printk(KERN_ERR "%s error allocating page \n", __func__);
> +		kvm_release_page_clean(vmcs_page);
> +		return NULL;
> +	}
> +
> +	return vmcs_page;
> +
> +}
> +
>  static int init_rmode(struct kvm *kvm);
>  static u64 construct_eptp(unsigned long root_hpa);
>  
> @@ -197,6 +364,8 @@ static struct kvm_vmx_segment_field {
>  
>  static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
>  
> +static int nested_vmx_check_permission(struct kvm_vcpu *vcpu);
> +static int create_l1_state(struct kvm_vcpu *vcpu);
>  static int create_l2_state(struct kvm_vcpu *vcpu);
>  
>  /*
> @@ -715,6 +884,24 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
>  	preempt_enable();
>  }
>  
> +
> +static int vmptrld(struct kvm_vcpu *vcpu,
> +		   u64 phys_addr)
> +{
> +	u8 error;
> +
> +	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
> +		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
> +		      : "cc");
> +	if (error) {
> +		printk(KERN_ERR "kvm: %s vmptrld %llx failed\n",
> +		       __func__, phys_addr);
> +		return 1;
> +	}
> +
> +	return 0;
> +}
> +
>  /*
>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>   * vcpu mutex is already taken.
> @@ -736,15 +923,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  	}
>  
>  	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
> -		u8 error;
> -
>  		per_cpu(current_vmcs, cpu) = vmx->vmcs;
> -		asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
> -			      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
> -			      : "cc");
> -		if (error)
> -			printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
> -			       vmx->vmcs, phys_addr);
> +		vmptrld(vcpu, phys_addr);
>  	}
>  
>  	if (vcpu->cpu != cpu) {
> @@ -1318,6 +1498,28 @@ struct level_state *create_state(void)
>  	return state;
>  }
>  
> +int create_l1_state(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +	if (!vmx->nested.l1_state) {
> +		vmx->nested.l1_state = create_state();
> +		if (!vmx->nested.l1_state)
> +			return -ENOMEM;
> +	} else
> +		return 0;
> +
> +	vmx->nested.l1_state->shadow_vmcs = kzalloc(PAGE_SIZE, GFP_KERNEL);
> +	if (!vmx->nested.l1_state->shadow_vmcs) {
> +		printk(KERN_INFO "%s error creating shadow vmcs\n",
> +		       __func__);
> +		kfree(vmx->nested.l1_state);
> +		return -ENOMEM;
> +	}
> +	return 0;
> +}
> +
> +static struct vmcs *alloc_vmcs(void);
>  int create_l2_state(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -1326,11 +1528,53 @@ int create_l2_state(struct kvm_vcpu *vcpu)
>  		vmx->nested.l2_state = create_state();
>  		if (!vmx->nested.l2_state)
>  			return -ENOMEM;
> +	} else
> +		return 0;
> +
> +	vmx->nested.l2_state->vmcs = alloc_vmcs();
> +	if (!vmx->nested.l2_state->vmcs) {
> +		printk(KERN_ERR "%s error in creating level 2 vmcs", __func__);
> +		kfree(vmx->nested.l2_state);
> +		return -ENOMEM;
>  	}
>  
> +	if (cpu_has_vmx_msr_bitmap())
> +		vmx->nested.l2_state->msr_bitmap = vmcs_read64(MSR_BITMAP);
> +	else
> +		vmx->nested.l2_state->msr_bitmap = 0;
> +
> +	vmx->nested.l2_state->io_bitmap_a = vmcs_read64(IO_BITMAP_A);
> +	vmx->nested.l2_state->io_bitmap_b = vmcs_read64(IO_BITMAP_B);
> +
>  	return 0;
>  }
>  
> +int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +			struct kvm_vcpu *vcpu);
> +
Move to header.

> +int read_guest_vmcs_gpa(struct kvm_vcpu *vcpu, u64 *gentry)
> +{
Please make all local functions static. Here and in all other patches.

> +
> +	int r = 0;
> +
> +	r = kvm_read_guest_virt(vcpu->arch.regs[VCPU_REGS_RAX], gentry,
> +				sizeof(u64), vcpu);
vmptrld operand can be not only in RAX but in other registers too or in memory.

> +	if (r) {
> +		printk(KERN_ERR "%s cannot read guest vmcs addr %lx : %d\n",
> +		       __func__, vcpu->arch.regs[VCPU_REGS_RAX], r);
> +		return r;
> +	}
> +
> +	if (!IS_ALIGNED(*gentry, PAGE_SIZE)) {
> +		printk(KERN_DEBUG "%s addr %llx not aligned\n",
> +		       __func__, *gentry);
> +		return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +
>  /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
>   * tricks.
>   */
> @@ -3374,6 +3618,66 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  	return 1;
>  }
>  
> +static int handle_vmptrld(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct page *vmcs_page;
> +	u64 guest_vmcs_addr;
> +
> +	if (!nested_vmx_check_permission(vcpu))
> +		return 1;
> +
> +	if (read_guest_vmcs_gpa(vcpu, &guest_vmcs_addr))
> +		return 1;
> +
> +	if (create_l1_state(vcpu)) {
> +		printk(KERN_ERR "%s create_l1_state failed\n", __func__);
> +		return 1;
> +	}
> +
> +	if (create_l2_state(vcpu)) {
> +		printk(KERN_ERR "%s create_l2_state failed\n", __func__);
> +		return 1;
> +	}
create_l2_state() is called on vmxon. As far as I can see this is nop
here.

> +
> +	if (vmx->nested.vmptr != guest_vmcs_addr) {
> +		/* checking vmptr address */
> +		vmcs_page = nested_get_page(vcpu, guest_vmcs_addr);
> +		if (vmcs_page == NULL)
> +			return 1;
> +
> +		vmx->nested.vmptr = guest_vmcs_addr;
> +
> +		kvm_release_page_clean(vmcs_page);
> +	}
> +
> +	clear_rflags_cf_zf(vcpu);
> +	skip_emulated_instruction(vcpu);
> +	return 1;
> +}
> +
> +int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +			 struct kvm_vcpu *vcpu);
Move to header.

> +
> +static int handle_vmptrst(struct kvm_vcpu *vcpu)
> +{
> +	int r = 0;
> +
> +	if (!nested_vmx_check_permission(vcpu))
> +		return 1;
> +
> +	r = kvm_write_guest_virt(vcpu->arch.regs[VCPU_REGS_RAX],
> +				 (void *)&to_vmx(vcpu)->nested.vmptr,
> +				 sizeof(u64), vcpu);
Same as vmptrld. Why are you assuming RAX?

> +	if (r) {
> +		printk(KERN_INFO "%s failed to write vmptr\n", __func__);
> +		return 1;
> +	}
> +	clear_rflags_cf_zf(vcpu);
> +	skip_emulated_instruction(vcpu);
> +	return 1;
> +}
> +
>  static int handle_invlpg(struct kvm_vcpu *vcpu)
>  {
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> @@ -3644,8 +3948,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
>  	[EXIT_REASON_VMCALL]                  = handle_vmcall,
>  	[EXIT_REASON_VMCLEAR]	              = handle_vmclear,
>  	[EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
> -	[EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
> -	[EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
> +	[EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
> +	[EXIT_REASON_VMPTRST]                 = handle_vmptrst,
>  	[EXIT_REASON_VMREAD]                  = handle_vmx_insn,
>  	[EXIT_REASON_VMRESUME]                = handle_vmx_insn,
>  	[EXIT_REASON_VMWRITE]                 = handle_vmx_insn,
> @@ -4183,6 +4487,148 @@ static bool vmx_gb_page_enable(void)
>  	return false;
>  }
>  
> +void save_vmcs(struct shadow_vmcs *dst)
> +{
Not used by this patch. May be introduce in the patch that uses it.

> +	dst->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
> +	dst->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
> +	dst->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
> +	dst->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
> +	dst->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
> +	dst->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
> +	dst->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
> +	dst->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
> +	dst->host_es_selector = vmcs_read16(HOST_ES_SELECTOR);
> +	dst->host_cs_selector = vmcs_read16(HOST_CS_SELECTOR);
> +	dst->host_ss_selector = vmcs_read16(HOST_SS_SELECTOR);
> +	dst->host_ds_selector = vmcs_read16(HOST_DS_SELECTOR);
> +	dst->host_fs_selector = vmcs_read16(HOST_FS_SELECTOR);
> +	dst->host_gs_selector = vmcs_read16(HOST_GS_SELECTOR);
> +	dst->host_tr_selector = vmcs_read16(HOST_TR_SELECTOR);
> +	dst->io_bitmap_a = vmcs_read64(IO_BITMAP_A);
> +	dst->io_bitmap_b = vmcs_read64(IO_BITMAP_B);
> +	if (cpu_has_vmx_msr_bitmap())
> +		dst->msr_bitmap = vmcs_read64(MSR_BITMAP);
> +
> +	dst->vm_exit_msr_store_addr = vmcs_read64(VM_EXIT_MSR_STORE_ADDR);
> +	dst->vm_exit_msr_load_addr = vmcs_read64(VM_EXIT_MSR_LOAD_ADDR);
> +	dst->vm_entry_msr_load_addr = vmcs_read64(VM_ENTRY_MSR_LOAD_ADDR);
> +	dst->tsc_offset = vmcs_read64(TSC_OFFSET);
> +	dst->virtual_apic_page_addr = vmcs_read64(VIRTUAL_APIC_PAGE_ADDR);
> +	dst->apic_access_addr = vmcs_read64(APIC_ACCESS_ADDR);
> +	if (enable_ept)
> +		dst->ept_pointer = vmcs_read64(EPT_POINTER);
> +
> +	dst->guest_physical_address = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
> +	dst->vmcs_link_pointer = vmcs_read64(VMCS_LINK_POINTER);
> +	dst->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
> +	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
> +		dst->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
> +	if (enable_ept) {
> +		dst->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
> +		dst->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
> +		dst->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
> +		dst->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
> +	}
> +	dst->pin_based_vm_exec_control = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
> +	dst->cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
> +	dst->exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
> +	dst->page_fault_error_code_mask =
> +		vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK);
> +	dst->page_fault_error_code_match =
> +		vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH);
> +	dst->cr3_target_count = vmcs_read32(CR3_TARGET_COUNT);
> +	dst->vm_exit_controls = vmcs_read32(VM_EXIT_CONTROLS);
> +	dst->vm_exit_msr_store_count = vmcs_read32(VM_EXIT_MSR_STORE_COUNT);
> +	dst->vm_exit_msr_load_count = vmcs_read32(VM_EXIT_MSR_LOAD_COUNT);
> +	dst->vm_entry_controls = vmcs_read32(VM_ENTRY_CONTROLS);
> +	dst->vm_entry_msr_load_count = vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT);
> +	dst->vm_entry_intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
> +	dst->vm_entry_exception_error_code =
> +		vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE);
> +	dst->vm_entry_instruction_len = vmcs_read32(VM_ENTRY_INSTRUCTION_LEN);
> +	dst->tpr_threshold = vmcs_read32(TPR_THRESHOLD);
> +	dst->secondary_vm_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
> +	if (enable_vpid && dst->secondary_vm_exec_control &
> +	    SECONDARY_EXEC_ENABLE_VPID)
> +		dst->virtual_processor_id = vmcs_read16(VIRTUAL_PROCESSOR_ID);
> +	dst->vm_instruction_error = vmcs_read32(VM_INSTRUCTION_ERROR);
> +	dst->vm_exit_reason  = vmcs_read32(VM_EXIT_REASON);
> +	dst->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
> +	dst->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
> +	dst->idt_vectoring_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
> +	dst->idt_vectoring_error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE);
> +	dst->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
> +	dst->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	dst->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
> +	dst->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
> +	dst->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
> +	dst->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
> +	dst->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
> +	dst->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
> +	dst->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
> +	dst->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
> +	dst->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
> +	dst->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
> +	dst->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
> +	dst->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
> +	dst->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
> +	dst->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
> +	dst->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
> +	dst->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
> +	dst->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
> +	dst->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
> +	dst->guest_interruptibility_info =
> +		vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
> +	dst->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
> +	dst->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
> +	dst->host_ia32_sysenter_cs = vmcs_read32(HOST_IA32_SYSENTER_CS);
> +	dst->cr0_guest_host_mask = vmcs_readl(CR0_GUEST_HOST_MASK);
> +	dst->cr4_guest_host_mask = vmcs_readl(CR4_GUEST_HOST_MASK);
> +	dst->cr0_read_shadow = vmcs_readl(CR0_READ_SHADOW);
> +	dst->cr4_read_shadow = vmcs_readl(CR4_READ_SHADOW);
> +	dst->cr3_target_value0 = vmcs_readl(CR3_TARGET_VALUE0);
> +	dst->cr3_target_value1 = vmcs_readl(CR3_TARGET_VALUE1);
> +	dst->cr3_target_value2 = vmcs_readl(CR3_TARGET_VALUE2);
> +	dst->cr3_target_value3 = vmcs_readl(CR3_TARGET_VALUE3);
> +	dst->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> +	dst->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
> +	dst->guest_cr0 = vmcs_readl(GUEST_CR0);
> +	dst->guest_cr3 = vmcs_readl(GUEST_CR3);
> +	dst->guest_cr4 = vmcs_readl(GUEST_CR4);
> +	dst->guest_es_base = vmcs_readl(GUEST_ES_BASE);
> +	dst->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
> +	dst->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
> +	dst->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
> +	dst->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
> +	dst->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
> +	dst->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
> +	dst->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
> +	dst->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
> +	dst->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
> +	dst->guest_dr7 = vmcs_readl(GUEST_DR7);
> +	dst->guest_rsp = vmcs_readl(GUEST_RSP);
> +	dst->guest_rip = vmcs_readl(GUEST_RIP);
> +	dst->guest_rflags = vmcs_readl(GUEST_RFLAGS);
> +	dst->guest_pending_dbg_exceptions =
> +		vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
> +	dst->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
> +	dst->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
> +	dst->host_cr0 = vmcs_readl(HOST_CR0);
> +	dst->host_cr3 = vmcs_readl(HOST_CR3);
> +	dst->host_cr4 = vmcs_readl(HOST_CR4);
> +	dst->host_fs_base = vmcs_readl(HOST_FS_BASE);
> +	dst->host_gs_base = vmcs_readl(HOST_GS_BASE);
> +	dst->host_tr_base = vmcs_readl(HOST_TR_BASE);
> +	dst->host_gdtr_base = vmcs_readl(HOST_GDTR_BASE);
> +	dst->host_idtr_base = vmcs_readl(HOST_IDTR_BASE);
> +	dst->host_ia32_sysenter_esp = vmcs_readl(HOST_IA32_SYSENTER_ESP);
> +	dst->host_ia32_sysenter_eip = vmcs_readl(HOST_IA32_SYSENTER_EIP);
> +	dst->host_rsp = vmcs_readl(HOST_RSP);
> +	dst->host_rip = vmcs_readl(HOST_RIP);
> +	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT)
> +		dst->host_ia32_pat = vmcs_read64(HOST_IA32_PAT);
> +}
> +
>  static struct kvm_x86_ops vmx_x86_ops = {
>  	.cpu_has_kvm_support = cpu_has_kvm_support,
>  	.disabled_by_bios = vmx_disabled_by_bios,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9c39092..74eb888 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2473,6 +2473,7 @@ static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
>  out:
>  	return r;
>  }
> +EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
>  
>  static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
>  				struct kvm_vcpu *vcpu)
> @@ -2503,7 +2504,7 @@ static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
>  out:
>  	return r;
>  }
> -
> +EXPORT_SYMBOL_GPL(kvm_write_guest_virt);
>  
>  static int emulator_read_emulated(unsigned long addr,
>  				  void *val,
> -- 
> 1.6.0.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux