Re: [PATCH] x86/sgx: Pass userspace source address directly to EADD

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Applied.

/Jarkko

On Thu, 2019-08-22 at 19:10 -0700, Sean Christopherson wrote:
> Invoke EADD with the userspace source address instead of first copying
> the data to a kernel page to avoid the overhead of alloc_page() and
> copy_from_user().
> 
> Remove all pre-validation of TCS pages.  The source page is no longer
> readily available since it's not copied into the kernel, and validating
> the TCS essentially requires accessing the entire page since the vast
> majority of the TCS is reserved bytes.  Given that userspace can now
> cause EADD to fault simply by passing a bad pointer, validating the TCS
> to avoid faults on EADD provides little to no value.
> 
> Suggested-by: Andy Lutomirski <luto@xxxxxxxxxx>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
> ---
>  arch/x86/kernel/cpu/sgx/driver/ioctl.c | 148 ++++++-------------------
>  1 file changed, 33 insertions(+), 115 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/driver/ioctl.c b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
> index 85e36e530baf..f02b31acd3ad 100644
> --- a/arch/x86/kernel/cpu/sgx/driver/ioctl.c
> +++ b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
> @@ -305,71 +305,46 @@ static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
>  	return 0;
>  }
>  
> -static bool sgx_validate_offset(struct sgx_encl *encl, unsigned long offset)
> -{
> -	if (offset & (PAGE_SIZE - 1))
> -		return false;
> -
> -	if (offset >= encl->size)
> -		return false;
> -
> -	return true;
> -}
> -
> -static int sgx_validate_tcs(struct sgx_encl *encl, struct sgx_tcs *tcs)
> -{
> -	int i;
> -
> -	if (tcs->flags & SGX_TCS_RESERVED_MASK)
> -		return -EINVAL;
> -
> -	if (tcs->flags & SGX_TCS_DBGOPTIN)
> -		return -EINVAL;
> -
> -	if (!sgx_validate_offset(encl, tcs->ssa_offset))
> -		return -EINVAL;
> -
> -	if (!sgx_validate_offset(encl, tcs->fs_offset))
> -		return -EINVAL;
> -
> -	if (!sgx_validate_offset(encl, tcs->gs_offset))
> -		return -EINVAL;
> -
> -	if ((tcs->fs_limit & 0xFFF) != 0xFFF)
> -		return -EINVAL;
> -
> -	if ((tcs->gs_limit & 0xFFF) != 0xFFF)
> -		return -EINVAL;
> -
> -	for (i = 0; i < SGX_TCS_RESERVED_SIZE; i++)
> -		if (tcs->reserved[i])
> -			return -EINVAL;
> -
> -	return 0;
> -}
> -
>  static int __sgx_encl_add_page(struct sgx_encl *encl,
>  			       struct sgx_encl_page *encl_page,
>  			       struct sgx_epc_page *epc_page,
> -			       void *data,
> -			       struct sgx_secinfo *secinfo,
> -			       unsigned long mrmask)
> +			       struct sgx_secinfo *secinfo, unsigned long src,
> +			       unsigned long prot, unsigned long mrmask)
>  {
>  	struct sgx_pageinfo pginfo;
> +	struct vm_area_struct *vma;
>  	int ret;
>  	int i;
>  
>  	pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
>  	pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
>  	pginfo.metadata = (unsigned long)secinfo;
> -	pginfo.contents = (unsigned long)data;
> +	pginfo.contents = src;
>  
> +	down_read(&current->mm->mmap_sem);
> +
> +	/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
> +	if (encl_page->vm_max_prot_bits & VM_EXEC) {
> +		vma = find_vma(current->mm, src);
> +		if (!vma) {
> +			up_read(&current->mm->mmap_sem);
> +			return -EFAULT;
> +		}
> +
> +		if (!(vma->vm_flags & VM_MAYEXEC)) {
> +			up_read(&current->mm->mmap_sem);
> +			return -EACCES;
> +		}
> +	}
> +
> +	__uaccess_begin();
>  	ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
> -	if (ret) {
> -		if (encls_failed(ret))
> -			ENCLS_WARN(ret, "EADD");
> +	__uaccess_end();
> +
> +	up_read(&current->mm->mmap_sem);
> +
> +	if (ret)
>  		return -EFAULT;
> -	}
>  
>  	for_each_set_bit(i, &mrmask, 16) {
>  		ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
> @@ -389,9 +364,9 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
>  	return 0;
>  }
>  
> -static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
> -			     void *data, struct sgx_secinfo *secinfo,
> -			     unsigned int mrmask, unsigned long prot)
> +static int sgx_encl_add_page(struct sgx_encl *encl,
> +			     struct sgx_enclave_add_page *addp,
> +			     struct sgx_secinfo *secinfo, unsigned long prot)
>  {
>  	u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
>  	struct sgx_encl_page *encl_page;
> @@ -399,13 +374,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
>  	struct sgx_va_page *va_page;
>  	int ret;
>  
> -	if (page_type == SGX_SECINFO_TCS) {
> -		ret = sgx_validate_tcs(encl, data);
> -		if (ret)
> -			return ret;
> -	}
> -
> -	encl_page = sgx_encl_page_alloc(encl, addr, prot, page_type);
> +	encl_page = sgx_encl_page_alloc(encl, addp->addr, prot, page_type);
>  	if (IS_ERR(encl_page))
>  		return PTR_ERR(encl_page);
>  
> @@ -428,8 +397,8 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
>  	if (ret)
>  		goto err_out_shrink;
>  
> -	ret = __sgx_encl_add_page(encl, encl_page, epc_page, data, secinfo,
> -				  mrmask);
> +	ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
> +				  addp->src, prot, addp->mrmask);
>  	if (ret)
>  		goto err_out;
>  
> @@ -451,36 +420,6 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
>  	return ret;
>  }
>  
> -static int sgx_encl_page_import_user(void *dst, unsigned long src,
> -				     unsigned long prot)
> -{
> -	struct vm_area_struct *vma;
> -	int ret = 0;
> -
> -	down_read(&current->mm->mmap_sem);
> -
> -	/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
> -	if (prot & PROT_EXEC) {
> -		vma = find_vma(current->mm, src);
> -		if (!vma) {
> -			ret = -EFAULT;
> -			goto out;
> -		}
> -
> -		if (!(vma->vm_flags & VM_MAYEXEC)) {
> -			ret = -EACCES;
> -			goto out;
> -		}
> -	}
> -
> -	if (copy_from_user(dst, (void __user *)src, PAGE_SIZE))
> -		ret = -EFAULT;
> -
> -out:
> -	up_read(&current->mm->mmap_sem);
> -	return ret;
> -}
> -
>  /**
>   * sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
>   *
> @@ -502,10 +441,7 @@ static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
>  	struct sgx_encl *encl = filep->private_data;
>  	struct sgx_enclave_add_page addp;
>  	struct sgx_secinfo secinfo;
> -	struct page *data_page;
>  	unsigned long prot;
> -	void *data;
> -	int ret;
>  
>  	if (!(encl->flags & SGX_ENCL_CREATED))
>  		return -EINVAL;
> @@ -527,12 +463,6 @@ static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
>  	if (sgx_validate_secinfo(&secinfo))
>  		return -EINVAL;
>  
> -	data_page = alloc_page(GFP_HIGHUSER);
> -	if (!data_page)
> -		return -ENOMEM;
> -
> -	data = kmap(data_page);
> -
>  	/* Set prot bits matching to the SECINFO permissions. */
>  	prot = _calc_vm_trans(secinfo.flags, SGX_SECINFO_R, PROT_READ)  |
>  	       _calc_vm_trans(secinfo.flags, SGX_SECINFO_W, PROT_WRITE) |
> @@ -546,19 +476,7 @@ static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
>  	if ((secinfo.flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
>  		prot |= PROT_READ | PROT_WRITE;
>  
> -	ret = sgx_encl_page_import_user(data, addp.src, prot);
> -	if (ret)
> -		goto out;
> -
> -	ret = sgx_encl_add_page(encl, addp.addr, data, &secinfo, addp.mrmask,
> -				prot);
> -	if (ret)
> -		goto out;
> -
> -out:
> -	kunmap(data_page);
> -	__free_page(data_page);
> -	return ret;
> +	return sgx_encl_add_page(encl, &addp, &secinfo, prot);
>  }
>  
>  static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux