Re: [PATCH v1 1/2] KVM: VMX: cleanup EPTP definitions

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



David Hildenbrand <david@xxxxxxxxxx> writes:

> Don't use shifts, tag them correctly as EPTP and use better matching
> names (PWL vs. GAW).

Fair enough. This reminds me, I also don't particularly like the vmx->nested.nested...
accesses that are spread all throughout :)

> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
> ---
>  arch/x86/include/asm/vmx.h | 11 ++++++-----
>  arch/x86/kvm/vmx.c         | 25 +++++++++++--------------
>  2 files changed, 17 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index 5f63a2e..929b3fc 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -468,12 +468,13 @@ enum vmcs_field {
>  #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
>  #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
>  
> -#define VMX_EPT_DEFAULT_GAW			3
> -#define VMX_EPT_MAX_GAW				0x4
>  #define VMX_EPT_MT_EPTE_SHIFT			3
> -#define VMX_EPT_GAW_EPTP_SHIFT			3
> -#define VMX_EPT_AD_ENABLE_BIT			(1ull << 6)
> -#define VMX_EPT_DEFAULT_MT			0x6ull
> +#define VMX_EPTP_PWL_MASK			0x38ull
> +#define VMX_EPTP_PWL_4				0x38ull
> +#define VMX_EPTP_AD_ENABLE_BIT			(1ull << 6)
> +#define VMX_EPTP_MT_MASK			0x7ull
> +#define VMX_EPTP_MT_WB				0x6ull
> +#define VMX_EPTP_MT_UC				0x0ull
>  #define VMX_EPT_READABLE_MASK			0x1ull
>  #define VMX_EPT_WRITABLE_MASK			0x2ull
>  #define VMX_EPT_EXECUTABLE_MASK			0x4ull
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c483f99..f6638ed 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4300,14 +4300,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
>  
>  static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
>  {
> -	u64 eptp;
> +	u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
>  
>  	/* TODO write the value reading from MSR */
> -	eptp = VMX_EPT_DEFAULT_MT |
> -		VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
>  	if (enable_ept_ad_bits &&
>  	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
> -		eptp |= VMX_EPT_AD_ENABLE_BIT;
> +		eptp |= VMX_EPTP_AD_ENABLE_BIT;
>  	eptp |= (root_hpa & PAGE_MASK);
>  
>  	return eptp;
> @@ -7879,16 +7877,15 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
>  static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	u64 mask = address & 0x7;
>  	int maxphyaddr = cpuid_maxphyaddr(vcpu);
>  
>  	/* Check for memory type validity */
> -	switch (mask) {
> -	case 0:
> +	switch (address & VMX_EPTP_MT_MASK) {
> +	case VMX_EPTP_MT_UC:
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
>  			return false;
>  		break;
> -	case 6:
> +	case VMX_EPTP_MT_WB:
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
>  			return false;
>  		break;
> @@ -7896,8 +7893,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  		return false;
>  	}
>  
> -	/* Bits 5:3 must be 3 */
> -	if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW)
> +	/* only 4 levels page-walk length are valid */
> +	if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
>  		return false;
>  
>  	/* Reserved bits should not be set */
> @@ -7905,7 +7902,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  		return false;
>  
>  	/* AD, if set, should be supported */
> -	if ((address & VMX_EPT_AD_ENABLE_BIT)) {
> +	if (address & VMX_EPTP_AD_ENABLE_BIT) {
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
>  			return false;
>  	}
> @@ -7933,7 +7930,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
>  				     &address, index * 8, 8))
>  		return 1;
>  
> -	accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT);
> +	accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
>  
>  	/*
>  	 * If the (L2) guest does a vmfunc to the currently
> @@ -9499,7 +9496,7 @@ static void __init vmx_check_processor_compat(void *rtn)
>  
>  static int get_ept_level(void)
>  {
> -	return VMX_EPT_DEFAULT_GAW + 1;
> +	return 4;
>  }
>  
>  static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> @@ -9702,7 +9699,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
>  
>  static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
>  {
> -	return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
> +	return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
>  }
>  
>  /* Callbacks for nested_ept_init_mmu_context: */



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux