On Mon, Sep 12, 2022 at 11:45:40AM +0200, Marco Elver wrote: > With Clang version 16+, -fsanitize=thread will turn > memcpy/memset/memmove calls in instrumented functions into > __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively. > > Add these functions to the core KCSAN runtime, so that we (a) catch data > races with mem* functions, and (b) won't run into linker errors with > such newer compilers. > > Cc: stable@xxxxxxxxxxxxxxx # v5.10+ > Signed-off-by: Marco Elver <elver@xxxxxxxxxx> Queued and pushed, thank you! Thanx, Paul > --- > v3: > * Truncate sizes larger than MAX_ENCODABLE_SIZE, so we still set up > watchpoints on them. Iterating through MAX_ENCODABLE_SIZE blocks may > result in pathological cases where performance would seriously suffer. > So let's avoid that for now. > * Just use memcpy/memset/memmove instead of __mem*() functions. Many > architectures that already support KCSAN don't define them (mips, > s390), and having both __mem* and mem versions of the functions > provides little benefit elsewhere; and backporting would become more > difficult, too. The compiler should not inline them given all > parameters are non-constants here. > > v2: > * Fix for architectures which do not provide their own > memcpy/memset/memmove and instead use the generic versions in > lib/string. In this case we'll just alias the __tsan_ variants. > --- > kernel/kcsan/core.c | 50 +++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 50 insertions(+) > > diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c > index fe12dfe254ec..54d077e1a2dc 100644 > --- a/kernel/kcsan/core.c > +++ b/kernel/kcsan/core.c > @@ -14,10 +14,12 @@ > #include <linux/init.h> > #include <linux/kernel.h> > #include <linux/list.h> > +#include <linux/minmax.h> > #include <linux/moduleparam.h> > #include <linux/percpu.h> > #include <linux/preempt.h> > #include <linux/sched.h> > +#include <linux/string.h> > #include <linux/uaccess.h> > > #include "encoding.h" > @@ -1308,3 +1310,51 @@ noinline void __tsan_atomic_signal_fence(int memorder) > } > } > EXPORT_SYMBOL(__tsan_atomic_signal_fence); > + > +#ifdef __HAVE_ARCH_MEMSET > +void *__tsan_memset(void *s, int c, size_t count); > +noinline void *__tsan_memset(void *s, int c, size_t count) > +{ > + /* > + * Instead of not setting up watchpoints where accessed size is greater > + * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE. > + */ > + size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE); > + > + check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_); > + return memset(s, c, count); > +} > +#else > +void *__tsan_memset(void *s, int c, size_t count) __alias(memset); > +#endif > +EXPORT_SYMBOL(__tsan_memset); > + > +#ifdef __HAVE_ARCH_MEMMOVE > +void *__tsan_memmove(void *dst, const void *src, size_t len); > +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) > +{ > + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); > + > + check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_); > + check_access(src, check_len, 0, _RET_IP_); > + return memmove(dst, src, len); > +} > +#else > +void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove); > +#endif > +EXPORT_SYMBOL(__tsan_memmove); > + > +#ifdef __HAVE_ARCH_MEMCPY > +void *__tsan_memcpy(void *dst, const void *src, size_t len); > +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) > +{ > + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); > + > + check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_); > + check_access(src, check_len, 0, _RET_IP_); > + return memcpy(dst, src, len); > +} > +#else > +void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy); > +#endif > +EXPORT_SYMBOL(__tsan_memcpy); > -- > 2.37.2.789.g6183377224-goog >