As of now these default to calling the arch provided __copy_{to,from}_user() routines which being general purpose (w.r.t buffer alignment and lengths) would lead to alignment checks in generated code (for arches which don't support unaligned load/stores). Given that in this case we already know that data involved is "unit" sized and aligned, using the vanilla copy backend is a bit wasteful. This change thus allows arches to over-ride the aforementioned routines. Signed-off-by: Vineet Gupta <vgupta@xxxxxxxxxxxx> Acked-by: Arnd Bergmann <arnd@xxxxxxxx> --- include/asm-generic/uaccess.h | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-) diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 5f6ee61..c184aa8 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -169,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to, -EFAULT; \ }) +#ifndef __put_user_fn + static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { size = __copy_to_user(ptr, x, size); return size ? -EFAULT : size; } +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + extern int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ @@ -225,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn)); -EFAULT; \ }) +#ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { size = __copy_from_user(x, ptr, size); return size ? -EFAULT : size; } +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + extern int __get_user_bad(void) __attribute__((noreturn)); #ifndef __copy_from_user_inatomic -- 1.7.4.1 -- To unsubscribe from this list: send the line "unsubscribe linux-arch" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html