[PATCH 6/7] m68k: provide __{get,put}_kernel_nofault

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Allow non-faulting access to kernel addresses without overriding the
address space.  Implemented by passing the instruction name to the
low-level assembly macros as an argument, and force the use of the
normal move instructions for kernel access.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 arch/m68k/include/asm/uaccess.h | 90 ++++++++++++++++++++++++++-------
 1 file changed, 72 insertions(+), 18 deletions(-)

diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 384b2a6b135c..2bedb1b8c4bf 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -39,9 +39,9 @@ static inline int access_ok(const void __user *addr,
 #define	MOVES	"move"
 #endif
 
-#define __put_user_asm(res, x, ptr, bwl, reg)		\
+#define __put_user_asm(inst, res, x, ptr, bwl, reg)	\
 asm volatile ("\n"					\
-	"1:	"MOVES"."#bwl"	%2,%1\n"		\
+	"1:	"inst"."#bwl"	%2,%1\n"		\
 	"2:\n"						\
 	"	.section .fixup,\"ax\"\n"		\
 	"	.even\n"				\
@@ -57,13 +57,13 @@ asm volatile ("\n"					\
 	: "+d" (res), "=m" (*(ptr))			\
 	: #reg (x), "i" (-EFAULT))
 
-#define __put_user_asm8(res, x, ptr)				\
+#define __put_user_asm8(inst, res, x, ptr)			\
 do {								\
 	const void *__pu_ptr = (const void __force *)(ptr);	\
 								\
 	asm volatile ("\n"					\
-		"1:	"MOVES".l %2,(%1)+\n"			\
-		"2:	"MOVES".l %R2,(%1)\n"			\
+		"1:	"inst".l %2,(%1)+\n"			\
+		"2:	"inst".l %R2,(%1)\n"			\
 		"3:\n"						\
 		"	.section .fixup,\"ax\"\n"		\
 		"	.even\n"				\
@@ -94,16 +94,16 @@ do {								\
 	__chk_user_ptr(ptr);						\
 	switch (sizeof (*(ptr))) {					\
 	case 1:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, b, d);		\
+		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d);	\
 		break;							\
 	case 2:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, w, r);		\
+		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r);	\
 		break;							\
 	case 4:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, l, r);		\
+		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r);	\
 		break;							\
 	case 8:								\
-		__put_user_asm8(__pu_err, __pu_val, ptr);		\
+		__put_user_asm8(MOVES, __pu_err, __pu_val, ptr);	\
 		break;							\
 	default:							\
 		BUILD_BUG();						\
@@ -113,10 +113,10 @@ do {								\
 #define put_user(x, ptr)	__put_user(x, ptr)
 
 
-#define __get_user_asm(res, x, ptr, type, bwl, reg) ({			\
+#define __get_user_asm(inst, res, x, ptr, type, bwl, reg) ({		\
 	type __gu_val;							\
 	asm volatile ("\n"						\
-		"1:	"MOVES"."#bwl"	%2,%1\n"			\
+		"1:	"inst"."#bwl"	%2,%1\n"			\
 		"2:\n"							\
 		"	.section .fixup,\"ax\"\n"			\
 		"	.even\n"					\
@@ -134,7 +134,7 @@ do {								\
 	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
 })
 
-#define __get_user_asm8(res, x, ptr)					\
+#define __get_user_asm8(inst, res, x, ptr) 				\
 do {									\
 	const void *__gu_ptr = (const void __force *)(ptr);		\
 	union {								\
@@ -143,8 +143,8 @@ do {									\
 	} __gu_val;							\
 									\
 	asm volatile ("\n"						\
-		"1:	"MOVES".l	(%2)+,%1\n"			\
-		"2:	"MOVES".l	(%2),%R1\n"			\
+		"1:	"inst".l (%2)+,%1\n"				\
+		"2:	"inst".l (%2),%R1\n"				\
 		"3:\n"							\
 		"	.section .fixup,\"ax\"\n"			\
 		"	.even\n"					\
@@ -172,16 +172,16 @@ do {									\
 	__chk_user_ptr(ptr);						\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
-		__get_user_asm(__gu_err, x, ptr, u8, b, d);		\
+		__get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d);	\
 		break;							\
 	case 2:								\
-		__get_user_asm(__gu_err, x, ptr, u16, w, r);		\
+		__get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r);	\
 		break;							\
 	case 4:								\
-		__get_user_asm(__gu_err, x, ptr, u32, l, r);		\
+		__get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r);	\
 		break;							\
 	case 8:								\
-		__get_user_asm8(__gu_err, x, ptr);			\
+		__get_user_asm8(MOVES, __gu_err, x, ptr);		\
 		break;							\
 	default:							\
 		BUILD_BUG();						\
@@ -190,6 +190,60 @@ do {									\
 })
 #define get_user(x, ptr) __get_user(x, ptr)
 
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label)			\
+do {									\
+	type __gk_dst = *(type *)(dst);					\
+	type *__gk_src = (type *)(src);					\
+	int __gk_err = 0;						\
+									\
+	switch (sizeof(type)) {						\
+	case 1:								\
+		__get_user_asm("move", __gk_err, __gk_dst, __gk_src, u8, b, d); \
+		break;							\
+	case 2:								\
+		__get_user_asm("move", __gk_err, __gk_dst, __gk_src, u16, w, r); \
+		break;							\
+	case 4:								\
+		__get_user_asm("move", __gk_err, __gk_dst, __gk_src, u32, l, r); \
+		break;							\
+	case 8:								\
+		__get_user_asm8("move", __gk_err, __gk_dst, __gk_src);	\
+		break;							\
+	default:							\
+		BUILD_BUG();						\
+	}								\
+	if (unlikely(__gk_err))						\
+		goto err_label;						\
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label)			\
+do {									\
+	type __pk_src = *(type *)(src);					\
+	type *__pk_dst = (type *)(dst);					\
+	int __pk_err = 0;						\
+									\
+	switch (sizeof(type)) {						\
+	case 1:								\
+		__put_user_asm("move", __pk_err, __pk_src, __pk_dst, b, d); \
+		break;							\
+	case 2:								\
+		__put_user_asm("move", __pk_err, __pk_src, __pk_dst, w, r); \
+		break;							\
+	case 4:								\
+		__put_user_asm("move", __pk_err, __pk_src, __pk_dst, l, r); \
+		break;							\
+	case 8:								\
+		__put_user_asm8("move", __pk_err, __pk_src, __pk_dst);	\
+		break;							\
+	default:							\
+		BUILD_BUG();						\
+	}								\
+	if (unlikely(__pk_err))						\
+		goto err_label;						\
+} while (0)
+
 unsigned long raw_copy_from_user(void *to, const void __user *from,
 		unsigned long n);
 unsigned long raw_copy_to_user(void __user *to, const void *from,
-- 
2.30.2




[Index of Archives]     [Video for Linux]     [Yosemite News]     [Linux S/390]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux