On Fri, Oct 18, 2019 at 3:22 PM Qian Cai <cai@xxxxxx> wrote: > > On Fri, 2019-10-18 at 11:42 +0200, glider@xxxxxxxxxx wrote: > > In order to report uninitialized memory coming from heap allocations > > KMSAN has to poison them unless they're created with __GFP_ZERO. > > > > It's handy that we need KMSAN hooks in the places where > > init_on_alloc/init_on_free initialization is performed. > > Well, there is SLUB debug which has red zoning and poisoning checks. What's > value of this patch? Sorry, are you talking about the whole patchset or just this patch? Note that SLUB debug is unable to detect uninitialized values with bit-to-bit precision, neither have I heard of anyone using it for detecting uses of uninitialized memory in the kernel at all. The purpose of SLUB debug is to detect corruptions of freed memory. > > > > Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> > > To: Alexander Potapenko <glider@xxxxxxxxxx> > > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > > Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> > > Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> > > Cc: linux-mm@xxxxxxxxx > > --- > > > > Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871 > > --- > > mm/slub.c | 37 +++++++++++++++++++++++++++++++------ > > 1 file changed, 31 insertions(+), 6 deletions(-) > > > > diff --git a/mm/slub.c b/mm/slub.c > > index 3d63ae320d31..3d6d4c63446e 100644 > > --- a/mm/slub.c > > +++ b/mm/slub.c > > @@ -21,6 +21,8 @@ > > #include <linux/proc_fs.h> > > #include <linux/seq_file.h> > > #include <linux/kasan.h> > > +#include <linux/kmsan.h> > > +#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */ > > #include <linux/cpu.h> > > #include <linux/cpuset.h> > > #include <linux/mempolicy.h> > > @@ -285,17 +287,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object) > > prefetch(object + s->offset); > > } > > > > +/* > > + * When running under KMSAN, get_freepointer_safe() may return an uninitialized > > + * pointer value in the case the current thread loses the race for the next > > + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in > > + * slab_alloc_node() will fail, so the uninitialized value won't be used, but > > + * KMSAN will still check all arguments of cmpxchg because of imperfect > > + * handling of inline assembly. > > + * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the > > + * return value of get_freepointer_safe(). > > + */ > > static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) > > { > > unsigned long freepointer_addr; > > void *p; > > > > if (!debug_pagealloc_enabled()) > > - return get_freepointer(s, object); > > + return KMSAN_INIT_VALUE(get_freepointer(s, object)); > > > > freepointer_addr = (unsigned long)object + s->offset; > > probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); > > - return freelist_ptr(s, p, freepointer_addr); > > + return KMSAN_INIT_VALUE(freelist_ptr(s, p, freepointer_addr)); > > } > > > > static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) > > @@ -1390,6 +1402,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) > > ptr = kasan_kmalloc_large(ptr, size, flags); > > /* As ptr might get tagged, call kmemleak hook after KASAN. */ > > kmemleak_alloc(ptr, size, 1, flags); > > + kmsan_kmalloc_large(ptr, size, flags); > > return ptr; > > } > > > > @@ -1397,6 +1410,7 @@ static __always_inline void kfree_hook(void *x) > > { > > kmemleak_free(x); > > kasan_kfree_large(x, _RET_IP_); > > + kmsan_kfree_large(x); > > } > > > > static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) > > @@ -1453,6 +1467,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, > > } while (object != old_tail); > > } > > > > + do { > > + object = next; > > + next = get_freepointer(s, object); > > + kmsan_slab_free(s, object); > > + } while (object != old_tail); > > + > > /* > > * Compiler cannot detect this function can be removed if slab_free_hook() > > * evaluates to nothing. Thus, catch all relevant config debug options here. > > @@ -2769,6 +2789,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, > > if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) > > memset(object, 0, s->object_size); > > > > + kmsan_slab_alloc(s, object, gfpflags); > > slab_post_alloc_hook(s, gfpflags, 1, &object); > > > > return object; > > @@ -2797,6 +2818,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) > > void *ret = slab_alloc(s, gfpflags, _RET_IP_); > > trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); > > ret = kasan_kmalloc(s, ret, size, gfpflags); > > + > > return ret; > > } > > EXPORT_SYMBOL(kmem_cache_alloc_trace); > > @@ -2809,7 +2831,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) > > > > trace_kmem_cache_alloc_node(_RET_IP_, ret, > > s->object_size, s->size, gfpflags, node); > > - > > return ret; > > } > > EXPORT_SYMBOL(kmem_cache_alloc_node); > > @@ -2825,6 +2846,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, > > size, s->size, gfpflags, node); > > > > ret = kasan_kmalloc(s, ret, size, gfpflags); > > + > > return ret; > > } > > EXPORT_SYMBOL(kmem_cache_alloc_node_trace); > > @@ -3150,7 +3172,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, > > void **p) > > { > > struct kmem_cache_cpu *c; > > - int i; > > + int i, j; > > > > /* memcg and kmem_cache debug support */ > > s = slab_pre_alloc_hook(s, flags); > > @@ -3188,11 +3210,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, > > > > /* Clear memory outside IRQ disabled fastpath loop */ > > if (unlikely(slab_want_init_on_alloc(flags, s))) { > > - int j; > > - > > for (j = 0; j < i; j++) > > memset(p[j], 0, s->object_size); > > } > > + for (j = 0; j < i; j++) > > + kmsan_slab_alloc(s, p[j], flags); > > > > /* memcg and kmem_cache debug support */ > > slab_post_alloc_hook(s, flags, size, p); > > @@ -3793,6 +3815,7 @@ static int __init setup_slub_min_objects(char *str) > > > > __setup("slub_min_objects=", setup_slub_min_objects); > > > > +__no_sanitize_memory > > void *__kmalloc(size_t size, gfp_t flags) > > { > > struct kmem_cache *s; > > @@ -5698,6 +5721,7 @@ static char *create_unique_id(struct kmem_cache *s) > > p += sprintf(p, "%07u", s->size); > > > > BUG_ON(p > name + ID_STR_LENGTH - 1); > > + kmsan_unpoison_shadow(name, p - name); > > return name; > > } > > > > @@ -5847,6 +5871,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) > > al->name = name; > > al->next = alias_list; > > alias_list = al; > > + kmsan_unpoison_shadow(al, sizeof(struct saved_alias)); > > return 0; > > } > > -- Alexander Potapenko Software Engineer Google Germany GmbH Erika-Mann-Straße, 33 80636 München Geschäftsführer: Paul Manicle, Halimah DeLaine Prado Registergericht und -nummer: Hamburg, HRB 86891 Sitz der Gesellschaft: Hamburg