Re: [RFC PATCH v1 01/10] s390/uaccess: Add storage key checked access to user memory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/18/22 10:52, Janis Schoetterl-Glausch wrote:
KVM needs a mechanism to do accesses to guest memory that honor
storage key protection.
Since the copy_to/from_user implementation makes use of move
instructions that support having an additional access key supplied,
we can implement __copy_from/to_user_with_key by enhancing the
existing implementation.

Signed-off-by: Janis Schoetterl-Glausch <scgl@xxxxxxxxxxxxx>

For this I'd like to have buy-in from the kernel maintainers.
The patch looks good to me but currently I don't understand all of the background so:

Acked-by: Janosch Frank <frankja@xxxxxxxxxxxxx>

---
  arch/s390/include/asm/uaccess.h | 32 ++++++++++++++++++
  arch/s390/lib/uaccess.c         | 57 +++++++++++++++++++++++----------
  2 files changed, 72 insertions(+), 17 deletions(-)

diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 02b467461163..5138040348cc 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -33,6 +33,38 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
#define access_ok(addr, size) __access_ok(addr, size) +unsigned long __must_check
+raw_copy_from_user_with_key(void *to, const void __user *from, unsigned long n,
+			    char key);
+
+unsigned long __must_check
+raw_copy_to_user_with_key(void __user *to, const void *from, unsigned long n,
+			  char key);
+
+static __always_inline __must_check unsigned long
+__copy_from_user_with_key(void *to, const void __user *from, unsigned long n,
+			  char key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	instrument_copy_from_user(to, from, n);
+	check_object_size(to, n, false);
+	return raw_copy_from_user_with_key(to, from, n, key);
+}
+
+static __always_inline __must_check unsigned long
+__copy_to_user_with_key(void __user *to, const void *from, unsigned long n,
+			char key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	instrument_copy_to_user(to, from, n);
+	check_object_size(from, n, true);
+	return raw_copy_to_user_with_key(to, from, n, key);
+}
+
  unsigned long __must_check
  raw_copy_from_user(void *to, const void __user *from, unsigned long n);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d3a700385875..ce7a150dd93a 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -59,11 +59,13 @@ static inline int copy_with_mvcos(void)
  #endif
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
-						 unsigned long size)
+						 unsigned long size, char key)
  {
  	unsigned long tmp1, tmp2;
  	union oac spec = {
+		.oac2.key = key,
  		.oac2.as = PSW_BITS_AS_SECONDARY,
+		.oac2.k = 1,
  		.oac2.a = 1,
  	};
@@ -94,19 +96,19 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
  }
static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
-						unsigned long size)
+						unsigned long size, char key)
  {
  	unsigned long tmp1, tmp2;
tmp1 = -256UL;
  	asm volatile(
  		"   sacf  0\n"
-		"0: mvcp  0(%0,%2),0(%1),%3\n"
+		"0: mvcp  0(%0,%2),0(%1),%[key]\n"
  		"7: jz    5f\n"
  		"1: algr  %0,%3\n"
  		"   la    %1,256(%1)\n"
  		"   la    %2,256(%2)\n"
-		"2: mvcp  0(%0,%2),0(%1),%3\n"
+		"2: mvcp  0(%0,%2),0(%1),%[key]\n"
  		"8: jnz   1b\n"
  		"   j     5f\n"
  		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
@@ -115,7 +117,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
  		"   slgr  %4,%1\n"
  		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
  		"   jnh   6f\n"
-		"4: mvcp  0(%4,%2),0(%1),%3\n"
+		"4: mvcp  0(%4,%2),0(%1),%[key]\n"
  		"9: slgr  %0,%4\n"
  		"   j     6f\n"
  		"5: slgr  %0,%0\n"
@@ -123,24 +125,36 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
  		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: : "cc", "memory");
+		: [key] "d" (key << 4)
+		: "cc", "memory");
  	return size;
  }
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  {
  	if (copy_with_mvcos())
-		return copy_from_user_mvcos(to, from, n);
-	return copy_from_user_mvcp(to, from, n);
+		return copy_from_user_mvcos(to, from, n, 0);
+	return copy_from_user_mvcp(to, from, n, 0);
  }
  EXPORT_SYMBOL(raw_copy_from_user);
-static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
-					       unsigned long size)
+unsigned long raw_copy_from_user_with_key(void *to, const void __user *from,
+					  unsigned long n, char key)
+{
+	if (copy_with_mvcos())
+		return copy_from_user_mvcos(to, from, n, key);
+	return copy_from_user_mvcp(to, from, n, key);
+}
+EXPORT_SYMBOL(raw_copy_from_user_with_key);
+
+inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
+					unsigned long size, char key)
  {
  	unsigned long tmp1, tmp2;
  	union oac spec = {
+		.oac1.key = key,
  		.oac1.as = PSW_BITS_AS_SECONDARY,
+		.oac1.k = 1,
  		.oac1.a = 1,
  	};
@@ -171,19 +185,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
  }
static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
-					      unsigned long size)
+					      unsigned long size, char key)
  {
  	unsigned long tmp1, tmp2;
tmp1 = -256UL;
  	asm volatile(
  		"   sacf  0\n"
-		"0: mvcs  0(%0,%1),0(%2),%3\n"
+		"0: mvcs  0(%0,%1),0(%2),%[key]\n"
  		"7: jz    5f\n"
  		"1: algr  %0,%3\n"
  		"   la    %1,256(%1)\n"
  		"   la    %2,256(%2)\n"
-		"2: mvcs  0(%0,%1),0(%2),%3\n"
+		"2: mvcs  0(%0,%1),0(%2),%[key]\n"
  		"8: jnz   1b\n"
  		"   j     5f\n"
  		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
@@ -192,7 +206,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
  		"   slgr  %4,%1\n"
  		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
  		"   jnh   6f\n"
-		"4: mvcs  0(%4,%1),0(%2),%3\n"
+		"4: mvcs  0(%4,%1),0(%2),%[key]\n"
  		"9: slgr  %0,%4\n"
  		"   j     6f\n"
  		"5: slgr  %0,%0\n"
@@ -200,17 +214,26 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
  		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: : "cc", "memory");
+		: [key] "d" (key << 4)
+		: "cc", "memory");
  	return size;
  }
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  {
  	if (copy_with_mvcos())
-		return copy_to_user_mvcos(to, from, n);
-	return copy_to_user_mvcs(to, from, n);
+		return copy_to_user_mvcos(to, from, n, 0);
+	return copy_to_user_mvcs(to, from, n, 0);
  }
  EXPORT_SYMBOL(raw_copy_to_user);
+unsigned long raw_copy_to_user_with_key(void __user *to, const void *from,
+					unsigned long n, char key)
+{
+	if (copy_with_mvcos())
+		return copy_to_user_mvcos(to, from, n, key);
+	return copy_to_user_mvcs(to, from, n, key);
+}
+EXPORT_SYMBOL(raw_copy_to_user_with_key);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
  {





[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux