Re: [PATCH RFC v3 27/36] kmsan: hooks for copy_to_user() and friends

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Nov 29, 2019 at 4:34 PM Andrey Konovalov <andreyknvl@xxxxxxxxxx> wrote:
>
> On Fri, Nov 22, 2019 at 12:27 PM <glider@xxxxxxxxxx> wrote:
> >
> > Memory that is copied from userspace must be unpoisoned.
> > Before copying memory to userspace, check it and report an error if it
> > contains uninitialized bits.
> >
> > Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx>
> > To: Alexander Potapenko <glider@xxxxxxxxxx>
> > Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx>
> > Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx>
> > Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
> > Cc: linux-mm@xxxxxxxxx
> > ---
> > v3:
> >  - fixed compilation errors reported by kbuild test bot
> >
> > Change-Id: I38428b9c7d1909b8441dcec1749b080494a7af99
> > ---
> >  arch/x86/include/asm/uaccess.h   | 12 ++++++++++++
> >  include/asm-generic/cacheflush.h |  7 ++++++-
> >  include/asm-generic/uaccess.h    | 12 ++++++++++--
> >  include/linux/uaccess.h          | 32 +++++++++++++++++++++++++++-----
> >  lib/iov_iter.c                   |  6 ++++++
> >  lib/usercopy.c                   |  6 +++++-
> >  6 files changed, 66 insertions(+), 9 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
> > index 61d93f062a36..ac4b26583f7c 100644
> > --- a/arch/x86/include/asm/uaccess.h
> > +++ b/arch/x86/include/asm/uaccess.h
> > @@ -6,6 +6,7 @@
> >   */
> >  #include <linux/compiler.h>
> >  #include <linux/kasan-checks.h>
> > +#include <linux/kmsan-checks.h>
> >  #include <linux/string.h>
> >  #include <asm/asm.h>
> >  #include <asm/page.h>
> > @@ -174,6 +175,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
> >                         ASM_CALL_CONSTRAINT                             \
> >                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
> >         (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
> > +       kmsan_unpoison_shadow(&(x), sizeof(*(ptr)));                    \
> >         __builtin_expect(__ret_gu, 0);                                  \
> >  })
> >
> > @@ -248,6 +250,7 @@ extern void __put_user_8(void);
> >         __chk_user_ptr(ptr);                                    \
> >         might_fault();                                          \
> >         __pu_val = x;                                           \
> > +       kmsan_check_memory(&(__pu_val), sizeof(*(ptr)));        \
> >         switch (sizeof(*(ptr))) {                               \
> >         case 1:                                                 \
> >                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
> > @@ -270,7 +273,9 @@ extern void __put_user_8(void);
> >
> >  #define __put_user_size(x, ptr, size, label)                           \
> >  do {                                                                   \
> > +       __typeof__(*(ptr)) __pus_val = x;                               \
> >         __chk_user_ptr(ptr);                                            \
> > +       kmsan_check_memory(&(__pus_val), size);                         \
> >         switch (size) {                                                 \
> >         case 1:                                                         \
> >                 __put_user_goto(x, ptr, "b", "b", "iq", label); \
> > @@ -295,7 +300,10 @@ do {                                                                       \
> >   */
> >  #define __put_user_size_ex(x, ptr, size)                               \
> >  do {                                                                   \
> > +       __typeof__(*(ptr)) __puse_val;                                  \
>
> Can we do = x here?
Yes. Fixed, thanks!
> >         __chk_user_ptr(ptr);                                            \
> > +       __puse_val = x;                                                 \
> > +       kmsan_check_memory(&(__puse_val), size);                        \
> >         switch (size) {                                                 \
> >         case 1:                                                         \
> >                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
> > @@ -363,6 +371,7 @@ do {                                                                        \
> >         default:                                                        \
> >                 (x) = __get_user_bad();                                 \
> >         }                                                               \
> > +       kmsan_unpoison_shadow(&(x), size);                              \
> >  } while (0)
> >
> >  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
> > @@ -413,6 +422,7 @@ do {                                                                        \
> >         default:                                                        \
> >                 (x) = __get_user_bad();                                 \
> >         }                                                               \
> > +       kmsan_unpoison_shadow(&(x), size);                              \
> >  } while (0)
> >
> >  #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
> > @@ -428,11 +438,13 @@ do {                                                                      \
> >  #define __put_user_nocheck(x, ptr, size)                       \
> >  ({                                                             \
> >         __label__ __pu_label;                                   \
> > +       __typeof__(*(ptr)) __pun_val = x;                       \
>
> Not sure if this matters, but two lines below do (x).
Right.
> Also, why can't we use __pu_val instead of defining __pun_val?
Will do.
>
> >         int __pu_err = -EFAULT;                                 \
> >         __typeof__(*(ptr)) __pu_val = (x);                      \
> >         __typeof__(ptr) __pu_ptr = (ptr);                       \
> >         __typeof__(size) __pu_size = (size);                    \
> >         __uaccess_begin();                                      \
> > +       kmsan_check_memory(&(__pun_val), size);                 \
> >         __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label);     \
> >         __pu_err = 0;                                           \
> >  __pu_label:                                                    \
> > diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
> > index a950a22c4890..707531dccf5e 100644
> > --- a/include/asm-generic/cacheflush.h
> > +++ b/include/asm-generic/cacheflush.h
> > @@ -4,6 +4,7 @@
> >
> >  /* Keep includes the same across arches.  */
> >  #include <linux/mm.h>
> > +#include <linux/kmsan-checks.h>
> >
> >  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
> >
> > @@ -72,10 +73,14 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
> >
> >  #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
> >         do { \
> > +               kmsan_check_memory(src, len); \
> >                 memcpy(dst, src, len); \
> >                 flush_icache_user_range(vma, page, vaddr, len); \
> >         } while (0)
> >  #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
> > -       memcpy(dst, src, len)
> > +       do { \
> > +               memcpy(dst, src, len); \
> > +               kmsan_unpoison_shadow(dst, len); \
> > +       } while (0)
> >
> >  #endif /* __ASM_CACHEFLUSH_H */
> > diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
> > index e935318804f8..508ee649aeef 100644
> > --- a/include/asm-generic/uaccess.h
> > +++ b/include/asm-generic/uaccess.h
> > @@ -142,7 +142,11 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
> >
> >  static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
> >  {
> > -       return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
> > +       int n;
> > +
> > +       n = raw_copy_to_user(ptr, x, size);
> > +       kmsan_copy_to_user(ptr, x, size, n);
> > +       return unlikely(n) ? -EFAULT : 0;
> >  }
> >
> >  #define __put_user_fn(sz, u, k)        __put_user_fn(sz, u, k)
> > @@ -203,7 +207,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
> >  #ifndef __get_user_fn
> >  static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
> >  {
> > -       return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
> > +       int copied, to_copy = size;
> > +
> > +       copied = raw_copy_from_user(x, ptr, size);
> > +       kmsan_unpoison_shadow(x, to_copy - copied);
> > +       return unlikely(copied) ? -EFAULT : 0;
> >  }
> >
> >  #define __get_user_fn(sz, u, k)        __get_user_fn(sz, u, k)
> > diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> > index d4ee6e942562..7550d11a8077 100644
> > --- a/include/linux/uaccess.h
> > +++ b/include/linux/uaccess.h
> > @@ -5,6 +5,7 @@
> >  #include <linux/sched.h>
> >  #include <linux/thread_info.h>
> >  #include <linux/kasan-checks.h>
> > +#include <linux/kmsan-checks.h>
> >
> >  #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
> >
> > @@ -58,18 +59,26 @@
> >  static __always_inline __must_check unsigned long
> >  __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
> > +
> >         kasan_check_write(to, n);
> >         check_object_size(to, n, false);
> > -       return raw_copy_from_user(to, from, n);
> > +       n = raw_copy_from_user(to, from, n);
> > +       kmsan_unpoison_shadow(to, to_copy - n);
> > +       return n;
> >  }
> >
> >  static __always_inline __must_check unsigned long
> >  __copy_from_user(void *to, const void __user *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
>
> This is confusing. I think we need a var for raw_copy_from_user()
> return value instead. Same in functions above and below.
raw_copy_from_user() returns the number of bytes _not_ copied from
userspace. So in the case it returns 0 we need to unpoison to_copy
bytes.
> > +
> >         might_fault();
> >         kasan_check_write(to, n);
> >         check_object_size(to, n, false);
> > -       return raw_copy_from_user(to, from, n);
> > +       n = raw_copy_from_user(to, from, n);
> > +       kmsan_unpoison_shadow(to, to_copy - n);
> > +       return n;
> >  }
> >
> >  /**
> > @@ -88,29 +97,39 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
> >  static __always_inline __must_check unsigned long
> >  __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
> > +
> >         kasan_check_read(from, n);
> >         check_object_size(from, n, true);
> > -       return raw_copy_to_user(to, from, n);
> > +       n = raw_copy_to_user(to, from, n);
> > +       kmsan_copy_to_user((const void *)to, from, to_copy, n);
> > +       return n;
> >  }
> >
> >  static __always_inline __must_check unsigned long
> >  __copy_to_user(void __user *to, const void *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
> > +
> >         might_fault();
> >         kasan_check_read(from, n);
> >         check_object_size(from, n, true);
> > -       return raw_copy_to_user(to, from, n);
> > +       n = raw_copy_to_user(to, from, n);
> > +       kmsan_copy_to_user((const void *)to, from, to_copy, n);
> > +       return n;
> >  }
> >
> >  #ifdef INLINE_COPY_FROM_USER
> >  static inline __must_check unsigned long
> >  _copy_from_user(void *to, const void __user *from, unsigned long n)
> >  {
> > -       unsigned long res = n;
> > +       unsigned long res = n, to_copy = n;
> > +
> >         might_fault();
> >         if (likely(access_ok(from, n))) {
> >                 kasan_check_write(to, n);
> >                 res = raw_copy_from_user(to, from, n);
> > +               kmsan_unpoison_shadow(to, to_copy - res);
> >         }
> >         if (unlikely(res))
> >                 memset(to + (n - res), 0, res);
> > @@ -125,10 +144,13 @@ _copy_from_user(void *, const void __user *, unsigned long);
> >  static inline __must_check unsigned long
> >  _copy_to_user(void __user *to, const void *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
> > +
> >         might_fault();
> >         if (access_ok(to, n)) {
> >                 kasan_check_read(from, n);
> >                 n = raw_copy_to_user(to, from, n);
> > +               kmsan_copy_to_user(to, from, to_copy, n);
> >         }
> >         return n;
> >  }
> > diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> > index 639d5e7014c1..f038676068b2 100644
> > --- a/lib/iov_iter.c
> > +++ b/lib/iov_iter.c
> > @@ -137,18 +137,24 @@
> >
> >  static int copyout(void __user *to, const void *from, size_t n)
> >  {
> > +       size_t to_copy = n;
> > +
> >         if (access_ok(to, n)) {
> >                 kasan_check_read(from, n);
> >                 n = raw_copy_to_user(to, from, n);
> > +               kmsan_copy_to_user(to, from, to_copy, n);
> >         }
> >         return n;
> >  }
> >
> >  static int copyin(void *to, const void __user *from, size_t n)
> >  {
> > +       size_t to_copy = n;
> > +
> >         if (access_ok(from, n)) {
> >                 kasan_check_write(to, n);
> >                 n = raw_copy_from_user(to, from, n);
> > +               kmsan_unpoison_shadow(to, to_copy - n);
> >         }
> >         return n;
> >  }
> > diff --git a/lib/usercopy.c b/lib/usercopy.c
> > index cbb4d9ec00f2..abfd93edecba 100644
> > --- a/lib/usercopy.c
> > +++ b/lib/usercopy.c
> > @@ -1,4 +1,5 @@
> >  // SPDX-License-Identifier: GPL-2.0
> > +#include <linux/kmsan-checks.h>
> >  #include <linux/uaccess.h>
> >  #include <linux/bitops.h>
> >
> > @@ -7,11 +8,12 @@
> >  #ifndef INLINE_COPY_FROM_USER
> >  unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
> >  {
> > -       unsigned long res = n;
> > +       unsigned long res = n, to_copy = n;
> >         might_fault();
> >         if (likely(access_ok(from, n))) {
> >                 kasan_check_write(to, n);
> >                 res = raw_copy_from_user(to, from, n);
> > +               kmsan_unpoison_shadow(to, to_copy - res);
> >         }
> >         if (unlikely(res))
> >                 memset(to + (n - res), 0, res);
> > @@ -23,10 +25,12 @@ EXPORT_SYMBOL(_copy_from_user);
> >  #ifndef INLINE_COPY_TO_USER
> >  unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
> >  {
> > +       unsigned long to_copy = n;
> >         might_fault();
> >         if (likely(access_ok(to, n))) {
> >                 kasan_check_read(from, n);
> >                 n = raw_copy_to_user(to, from, n);
> > +               kmsan_copy_to_user(to, from, to_copy, n);
> >         }
> >         return n;
> >  }
> > --
> > 2.24.0.432.g9d3f5f5b63-goog
> >



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux