Re: [PATCH v4 01/10] s390/uaccess: Add copy_from/to_user_key functions

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 11 Feb 2022 19:22:06 +0100
Janis Schoetterl-Glausch <scgl@xxxxxxxxxxxxx> wrote:

> Add copy_from/to_user_key functions, which perform storage key checking.
> These functions can be used by KVM for emulating instructions that need
> to be key checked.
> These functions differ from their non _key counterparts in
> include/linux/uaccess.h only in the additional key argument and must be
> kept in sync with those.
> 
> Since the existing uaccess implementation on s390 makes use of move
> instructions that support having an additional access key supplied,
> we can implement raw_copy_from/to_user_key by enhancing the
> existing implementation.
> 
> Signed-off-by: Janis Schoetterl-Glausch <scgl@xxxxxxxxxxxxx>
> Acked-by: Heiko Carstens <hca@xxxxxxxxxxxxx>
> Reviewed-by: Christian Borntraeger <borntraeger@xxxxxxxxxxxxx>
> Acked-by: Janosch Frank <frankja@xxxxxxxxxxxxx>

Reviewed-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx>

> ---
>  arch/s390/include/asm/uaccess.h | 22 +++++++++
>  arch/s390/lib/uaccess.c         | 81 +++++++++++++++++++++++++--------
>  2 files changed, 85 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
> index d74e26b48604..ba1bcb91af95 100644
> --- a/arch/s390/include/asm/uaccess.h
> +++ b/arch/s390/include/asm/uaccess.h
> @@ -44,6 +44,28 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
>  #define INLINE_COPY_TO_USER
>  #endif
>  
> +unsigned long __must_check
> +_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
> +
> +static __always_inline unsigned long __must_check
> +copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
> +{
> +	if (likely(check_copy_size(to, n, false)))
> +		n = _copy_from_user_key(to, from, n, key);
> +	return n;
> +}
> +
> +unsigned long __must_check
> +_copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
> +
> +static __always_inline unsigned long __must_check
> +copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
> +{
> +	if (likely(check_copy_size(from, n, true)))
> +		n = _copy_to_user_key(to, from, n, key);
> +	return n;
> +}
> +
>  int __put_user_bad(void) __attribute__((noreturn));
>  int __get_user_bad(void) __attribute__((noreturn));
>  
> diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
> index 8a5d21461889..b709239feb5d 100644
> --- a/arch/s390/lib/uaccess.c
> +++ b/arch/s390/lib/uaccess.c
> @@ -59,11 +59,13 @@ static inline int copy_with_mvcos(void)
>  #endif
>  
>  static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
> -						 unsigned long size)
> +						 unsigned long size, unsigned long key)
>  {
>  	unsigned long tmp1, tmp2;
>  	union oac spec = {
> +		.oac2.key = key,
>  		.oac2.as = PSW_BITS_AS_SECONDARY,
> +		.oac2.k = 1,
>  		.oac2.a = 1,
>  	};
>  
> @@ -94,19 +96,19 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
>  }
>  
>  static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
> -						unsigned long size)
> +						unsigned long size, unsigned long key)
>  {
>  	unsigned long tmp1, tmp2;
>  
>  	tmp1 = -256UL;
>  	asm volatile(
>  		"   sacf  0\n"
> -		"0: mvcp  0(%0,%2),0(%1),%3\n"
> +		"0: mvcp  0(%0,%2),0(%1),%[key]\n"
>  		"7: jz    5f\n"
>  		"1: algr  %0,%3\n"
>  		"   la    %1,256(%1)\n"
>  		"   la    %2,256(%2)\n"
> -		"2: mvcp  0(%0,%2),0(%1),%3\n"
> +		"2: mvcp  0(%0,%2),0(%1),%[key]\n"
>  		"8: jnz   1b\n"
>  		"   j     5f\n"
>  		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
> @@ -115,7 +117,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
>  		"   slgr  %4,%1\n"
>  		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
>  		"   jnh   6f\n"
> -		"4: mvcp  0(%4,%2),0(%1),%3\n"
> +		"4: mvcp  0(%4,%2),0(%1),%[key]\n"
>  		"9: slgr  %0,%4\n"
>  		"   j     6f\n"
>  		"5: slgr  %0,%0\n"
> @@ -123,24 +125,49 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
>  		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
>  		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
>  		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
> -		: : "cc", "memory");
> +		: [key] "d" (key << 4)
> +		: "cc", "memory");
>  	return size;
>  }
>  
> -unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
> +static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
> +					    unsigned long n, unsigned long key)
>  {
>  	if (copy_with_mvcos())
> -		return copy_from_user_mvcos(to, from, n);
> -	return copy_from_user_mvcp(to, from, n);
> +		return copy_from_user_mvcos(to, from, n, key);
> +	return copy_from_user_mvcp(to, from, n, key);
> +}
> +
> +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
> +{
> +	return raw_copy_from_user_key(to, from, n, 0);
>  }
>  EXPORT_SYMBOL(raw_copy_from_user);
>  
> +unsigned long _copy_from_user_key(void *to, const void __user *from,
> +				  unsigned long n, unsigned long key)
> +{
> +	unsigned long res = n;
> +
> +	might_fault();
> +	if (!should_fail_usercopy()) {
> +		instrument_copy_from_user(to, from, n);
> +		res = raw_copy_from_user_key(to, from, n, key);
> +	}
> +	if (unlikely(res))
> +		memset(to + (n - res), 0, res);
> +	return res;
> +}
> +EXPORT_SYMBOL(_copy_from_user_key);
> +
>  static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
> -					       unsigned long size)
> +					       unsigned long size, unsigned long key)
>  {
>  	unsigned long tmp1, tmp2;
>  	union oac spec = {
> +		.oac1.key = key,
>  		.oac1.as = PSW_BITS_AS_SECONDARY,
> +		.oac1.k = 1,
>  		.oac1.a = 1,
>  	};
>  
> @@ -171,19 +198,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
>  }
>  
>  static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
> -					      unsigned long size)
> +					      unsigned long size, unsigned long key)
>  {
>  	unsigned long tmp1, tmp2;
>  
>  	tmp1 = -256UL;
>  	asm volatile(
>  		"   sacf  0\n"
> -		"0: mvcs  0(%0,%1),0(%2),%3\n"
> +		"0: mvcs  0(%0,%1),0(%2),%[key]\n"
>  		"7: jz    5f\n"
>  		"1: algr  %0,%3\n"
>  		"   la    %1,256(%1)\n"
>  		"   la    %2,256(%2)\n"
> -		"2: mvcs  0(%0,%1),0(%2),%3\n"
> +		"2: mvcs  0(%0,%1),0(%2),%[key]\n"
>  		"8: jnz   1b\n"
>  		"   j     5f\n"
>  		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
> @@ -192,7 +219,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
>  		"   slgr  %4,%1\n"
>  		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
>  		"   jnh   6f\n"
> -		"4: mvcs  0(%4,%1),0(%2),%3\n"
> +		"4: mvcs  0(%4,%1),0(%2),%[key]\n"
>  		"9: slgr  %0,%4\n"
>  		"   j     6f\n"
>  		"5: slgr  %0,%0\n"
> @@ -200,18 +227,36 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
>  		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
>  		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
>  		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
> -		: : "cc", "memory");
> +		: [key] "d" (key << 4)
> +		: "cc", "memory");
>  	return size;
>  }
>  
> -unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
> +static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
> +					  unsigned long n, unsigned long key)
>  {
>  	if (copy_with_mvcos())
> -		return copy_to_user_mvcos(to, from, n);
> -	return copy_to_user_mvcs(to, from, n);
> +		return copy_to_user_mvcos(to, from, n, key);
> +	return copy_to_user_mvcs(to, from, n, key);
> +}
> +
> +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
> +{
> +	return raw_copy_to_user_key(to, from, n, 0);
>  }
>  EXPORT_SYMBOL(raw_copy_to_user);
>  
> +unsigned long _copy_to_user_key(void __user *to, const void *from,
> +				unsigned long n, unsigned long key)
> +{
> +	might_fault();
> +	if (should_fail_usercopy())
> +		return n;
> +	instrument_copy_to_user(to, from, n);
> +	return raw_copy_to_user_key(to, from, n, key);
> +}
> +EXPORT_SYMBOL(_copy_to_user_key);
> +
>  static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
>  {
>  	unsigned long tmp1, tmp2;




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux