The patch titled Subject: kasan: fix krealloc handling for tag-based mode has been removed from the -mm tree. Its filename was kasan-fix-krealloc-handling-for-tag-based-mode.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan: fix krealloc handling for tag-based mode Right now tag-based KASAN can retag the memory that is reallocated via krealloc and return a differently tagged pointer even if the same slab object gets used and no reallocated technically happens. There are a few issues with this approach. One is that krealloc callers can't rely on comparing the return value with the passed argument to check whether reallocation happened. Another is that if a caller knows that no reallocation happened, that it can access object memory through the old pointer, which leads to false positives. Look at nf_ct_ext_add() to see an example. Fix this by keeping the same tag if the memory doesn't actually get reallocated during krealloc. Link: http://lkml.kernel.org/r/bc983dc45be2af41701ac3a88f154b5dd1459a26.1546450432.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 14 +++++--------- include/linux/slab.h | 4 ++-- mm/kasan/common.c | 20 ++++++++++++-------- mm/slab.c | 8 ++++---- mm/slab_common.c | 2 +- mm/slub.c | 10 +++++----- 6 files changed, 29 insertions(+), 29 deletions(-) --- a/include/linux/kasan.h~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/include/linux/kasan.h @@ -57,9 +57,8 @@ void * __must_check kasan_kmalloc_large( void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_poison_kfree(void *ptr, unsigned long ip); void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags); -void * __must_check kasan_krealloc(const void *object, size_t new_size, - gfp_t flags); + size_t size, gfp_t flags, bool krealloc); +void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); @@ -118,15 +117,12 @@ static inline void *kasan_kmalloc_large( static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags) -{ - return (void *)object; -} -static inline void *kasan_krealloc(const void *object, size_t new_size, - gfp_t flags) + size_t size, gfp_t flags, bool krealloc) { return (void *)object; } +static inline void kasan_krealloc(const void *object, size_t new_size, + gfp_t flags) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) --- a/include/linux/slab.h~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/include/linux/slab.h @@ -445,7 +445,7 @@ static __always_inline void *kmem_cache_ { void *ret = kmem_cache_alloc(s, flags); - ret = kasan_kmalloc(s, ret, size, flags); + ret = kasan_kmalloc(s, ret, size, flags, false); return ret; } @@ -456,7 +456,7 @@ kmem_cache_alloc_node_trace(struct kmem_ { void *ret = kmem_cache_alloc_node(s, gfpflags, node); - ret = kasan_kmalloc(s, ret, size, gfpflags); + ret = kasan_kmalloc(s, ret, size, gfpflags, false); return ret; } #endif /* CONFIG_TRACING */ --- a/mm/kasan/common.c~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/mm/kasan/common.c @@ -392,7 +392,7 @@ void * __must_check kasan_init_slab_obj( void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) { - return kasan_kmalloc(cache, object, cache->object_size, flags); + return kasan_kmalloc(cache, object, cache->object_size, flags, false); } static inline bool shadow_invalid(u8 tag, s8 shadow_byte) @@ -451,7 +451,7 @@ bool kasan_slab_free(struct kmem_cache * } void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags) + size_t size, gfp_t flags, bool krealloc) { unsigned long redzone_start; unsigned long redzone_end; @@ -468,8 +468,12 @@ void * __must_check kasan_kmalloc(struct redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_SHADOW_SCALE_SIZE); - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - tag = assign_tag(cache, object, false); + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) { + if (krealloc) + tag = get_tag(object); + else + tag = assign_tag(cache, object, false); + } /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ kasan_unpoison_shadow(set_tag(object, tag), size); @@ -508,19 +512,19 @@ void * __must_check kasan_kmalloc_large( return (void *)ptr; } -void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) +void kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; if (unlikely(object == ZERO_SIZE_PTR)) - return (void *)object; + return; page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) - return kasan_kmalloc_large(object, size, flags); + kasan_kmalloc_large(object, size, flags); else - return kasan_kmalloc(page->slab_cache, object, size, flags); + kasan_kmalloc(page->slab_cache, object, size, flags, true); } void kasan_poison_kfree(void *ptr, unsigned long ip) --- a/mm/slab.c~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/mm/slab.c @@ -3604,7 +3604,7 @@ kmem_cache_alloc_trace(struct kmem_cache ret = slab_alloc(cachep, flags, _RET_IP_); - ret = kasan_kmalloc(cachep, ret, size, flags); + ret = kasan_kmalloc(cachep, ret, size, flags, false); trace_kmalloc(_RET_IP_, ret, size, cachep->size, flags); return ret; @@ -3647,7 +3647,7 @@ void *kmem_cache_alloc_node_trace(struct ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - ret = kasan_kmalloc(cachep, ret, size, flags); + ret = kasan_kmalloc(cachep, ret, size, flags, false); trace_kmalloc_node(_RET_IP_, ret, size, cachep->size, flags, nodeid); @@ -3668,7 +3668,7 @@ __do_kmalloc_node(size_t size, gfp_t fla if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); - ret = kasan_kmalloc(cachep, ret, size, flags); + ret = kasan_kmalloc(cachep, ret, size, flags, false); return ret; } @@ -3706,7 +3706,7 @@ static __always_inline void *__do_kmallo return cachep; ret = slab_alloc(cachep, flags, caller); - ret = kasan_kmalloc(cachep, ret, size, flags); + ret = kasan_kmalloc(cachep, ret, size, flags, false); trace_kmalloc(caller, ret, size, cachep->size, flags); --- a/mm/slab_common.c~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/mm/slab_common.c @@ -1507,7 +1507,7 @@ static __always_inline void *__do_kreall ks = ksize(p); if (ks >= new_size) { - p = kasan_krealloc((void *)p, new_size, flags); + kasan_krealloc((void *)p, new_size, flags); return (void *)p; } --- a/mm/slub.c~kasan-fix-krealloc-handling-for-tag-based-mode +++ a/mm/slub.c @@ -2763,7 +2763,7 @@ void *kmem_cache_alloc_trace(struct kmem { void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); - ret = kasan_kmalloc(s, ret, size, gfpflags); + ret = kasan_kmalloc(s, ret, size, gfpflags, false); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); @@ -2791,7 +2791,7 @@ void *kmem_cache_alloc_node_trace(struct trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); - ret = kasan_kmalloc(s, ret, size, gfpflags); + ret = kasan_kmalloc(s, ret, size, gfpflags, false); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); @@ -3364,7 +3364,7 @@ static void early_kmem_cache_node_alloc( init_tracking(kmem_cache_node, n); #endif n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), - GFP_KERNEL); + GFP_KERNEL, false); page->freelist = get_freepointer(kmem_cache_node, n); page->inuse = 1; page->frozen = 0; @@ -3779,7 +3779,7 @@ void *__kmalloc(size_t size, gfp_t flags trace_kmalloc(_RET_IP_, ret, size, s->size, flags); - ret = kasan_kmalloc(s, ret, size, flags); + ret = kasan_kmalloc(s, ret, size, flags, false); return ret; } @@ -3823,7 +3823,7 @@ void *__kmalloc_node(size_t size, gfp_t trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); - ret = kasan_kmalloc(s, ret, size, flags); + ret = kasan_kmalloc(s, ret, size, flags, false); return ret; } _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are