2015-01-21 23:47 GMT+03:00 Sasha Levin <sasha.levin@xxxxxxxxxx>: > On 01/21/2015 11:51 AM, Andrey Ryabinin wrote: >> With this patch kasan will be able to catch bugs in memory allocated >> by slub. >> Initially all objects in newly allocated slab page, marked as redzone. >> Later, when allocation of slub object happens, requested by caller >> number of bytes marked as accessible, and the rest of the object >> (including slub's metadata) marked as redzone (inaccessible). >> >> We also mark object as accessible if ksize was called for this object. >> There is some places in kernel where ksize function is called to inquire >> size of really allocated area. Such callers could validly access whole >> allocated memory, so it should be marked as accessible. >> >> Code in slub.c and slab_common.c files could validly access to object's >> metadata, so instrumentation for this files are disabled. > > This one doesn't apply on -next. Is there a missing commit? > I don't see anything wrong. It's just difference between torvalds/-next trees. Attached patched for -next just in case. Also there is trivial conflict with -next in 11/17 patch. > Applying: mm: slub: add kernel address sanitizer support for slub allocator > fatal: sha1 information is lacking or useless (mm/slub.c). > Repository lacks necessary blobs to fall back on 3-way merge. > Cannot fall back to three-way merge. > Patch failed at 0007 mm: slub: add kernel address sanitizer support for slub allocator > When you have resolved this problem run "git am --resolved". > If you would prefer to skip this patch, instead run "git am --skip". > To restore the original branch and stop patching run "git am --abort". > > > Thanks, > Sasha >
From b7b545981438ecfbedf7c525410908f901105e13 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin <a.ryabinin@xxxxxxxxxxx> Date: Thu, 22 Jan 2015 04:12:03 +0300 Subject: [PATCH] mm: slub: add kernel address sanitizer support for slub allocator With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@xxxxxxxxxxx> Signed-off-by: Dmitry Chernenkov <dmitryc@xxxxxxxxxx> --- include/linux/kasan.h | 30 ++++++++++++++++ include/linux/slab.h | 11 ++++-- lib/Kconfig.kasan | 1 + mm/Makefile | 3 ++ mm/kasan/kasan.c | 98 +++++++++++++++++++++++++++++++++++++++++++++++++++ mm/kasan/report.c | 22 ++++++++++++ mm/slab_common.c | 5 ++- mm/slub.c | 34 ++++++++++++++++-- 8 files changed, 199 insertions(+), 5 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index a278ccc..940fc4f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -12,6 +12,9 @@ struct page; #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_FREE_PAGE 0xFF /* page was freed */ +#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ +#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ +#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ #define KASAN_SHADOW_GAP 0xF9 /* address belongs to shadow memory */ #include <asm/kasan.h> @@ -37,6 +40,18 @@ void kasan_unpoison_shadow(const void *address, size_t size); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); +void kasan_poison_slab(struct page *page); +void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +void kasan_poison_object_data(struct kmem_cache *cache, void *object); + +void kasan_kmalloc_large(const void *ptr, size_t size); +void kasan_kfree_large(const void *ptr); +void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); +void kasan_krealloc(const void *object, size_t new_size); + +void kasan_slab_alloc(struct kmem_cache *s, void *object); +void kasan_slab_free(struct kmem_cache *s, void *object); + #else /* CONFIG_KASAN */ static inline void kasan_unpoison_shadow(const void *address, size_t size) {} @@ -47,6 +62,21 @@ static inline void kasan_disable_local(void) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_poison_slab(struct page *page) {} +static inline void kasan_unpoison_object_data(struct kmem_cache *cache, + void *object) {} +static inline void kasan_poison_object_data(struct kmem_cache *cache, + void *object) {} + +static inline void kasan_kmalloc_large(void *ptr, size_t size) {} +static inline void kasan_kfree_large(const void *ptr) {} +static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size) {} +static inline void kasan_krealloc(const void *object, size_t new_size) {} + +static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} +static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} + #endif /* CONFIG_KASAN */ #endif /* LINUX_KASAN_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 2e3b448..f764096 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -104,6 +104,7 @@ (unsigned long)ZERO_SIZE_PTR) #include <linux/kmemleak.h> +#include <linux/kasan.h> struct mem_cgroup; /* @@ -325,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) { - return kmem_cache_alloc(s, flags); + void *ret = kmem_cache_alloc(s, flags); + + kasan_kmalloc(s, ret, size); + return ret; } static __always_inline void * @@ -333,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - return kmem_cache_alloc_node(s, gfpflags, node); + void *ret = kmem_cache_alloc_node(s, gfpflags, node); + + kasan_kmalloc(s, ret, size); + return ret; } #endif /* CONFIG_TRACING */ diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index f86070d..ada0260 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN config KASAN bool "AddressSanitizer: runtime memory debugger" depends on !MEMORY_HOTPLUG + depends on SLUB_DEBUG help Enables address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/mm/Makefile b/mm/Makefile index 79f4fbc..3c1caa2 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -2,6 +2,9 @@ # Makefile for the linux memory manager. # +KASAN_SANITIZE_slab_common.o := n +KASAN_SANITIZE_slub.o := n + mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index efe8105..c52350e 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -30,6 +30,7 @@ #include <linux/kasan.h> #include "kasan.h" +#include "../slab.h" /* * Poisons the shadow memory for 'size' bytes starting from 'addr'. @@ -261,6 +262,103 @@ void kasan_free_pages(struct page *page, unsigned int order) KASAN_FREE_PAGE); } +void kasan_poison_slab(struct page *page) +{ + kasan_poison_shadow(page_address(page), + PAGE_SIZE << compound_order(page), + KASAN_KMALLOC_REDZONE); +} + +void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) +{ + kasan_unpoison_shadow(object, cache->object_size); +} + +void kasan_poison_object_data(struct kmem_cache *cache, void *object) +{ + kasan_poison_shadow(object, + round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), + KASAN_KMALLOC_REDZONE); +} + +void kasan_slab_alloc(struct kmem_cache *cache, void *object) +{ + kasan_kmalloc(cache, object, cache->object_size); +} + +void kasan_slab_free(struct kmem_cache *cache, void *object) +{ + unsigned long size = cache->object_size; + unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); + + /* RCU slabs could be legally used after free within the RCU period */ + if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) + return; + + kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); +} + +void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) +{ + unsigned long redzone_start; + unsigned long redzone_end; + + if (unlikely(object == NULL)) + return; + + redzone_start = round_up((unsigned long)(object + size), + KASAN_SHADOW_SCALE_SIZE); + redzone_end = round_up((unsigned long)object + cache->object_size, + KASAN_SHADOW_SCALE_SIZE); + + kasan_unpoison_shadow(object, size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, + KASAN_KMALLOC_REDZONE); +} +EXPORT_SYMBOL(kasan_kmalloc); + +void kasan_kmalloc_large(const void *ptr, size_t size) +{ + struct page *page; + unsigned long redzone_start; + unsigned long redzone_end; + + if (unlikely(ptr == NULL)) + return; + + page = virt_to_page(ptr); + redzone_start = round_up((unsigned long)(ptr + size), + KASAN_SHADOW_SCALE_SIZE); + redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); + + kasan_unpoison_shadow(ptr, size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, + KASAN_PAGE_REDZONE); +} + +void kasan_krealloc(const void *object, size_t size) +{ + struct page *page; + + if (unlikely(object == ZERO_SIZE_PTR)) + return; + + page = virt_to_head_page(object); + + if (unlikely(!PageSlab(page))) + kasan_kmalloc_large(object, size); + else + kasan_kmalloc(page->slab_cache, object, size); +} + +void kasan_kfree_large(const void *ptr) +{ + struct page *page = virt_to_page(ptr); + + kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), + KASAN_FREE_PAGE); +} + #define DECLARE_ASAN_CHECK(size) \ void __asan_load##size(unsigned long addr) \ { \ diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 7983ebb..f9bc57a 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -24,6 +24,7 @@ #include <linux/kasan.h> #include "kasan.h" +#include "../slab.h" /* Shadow layout customization. */ #define SHADOW_BYTES_PER_BLOCK 1 @@ -55,8 +56,11 @@ static void print_error_description(struct access_info *info) switch (shadow_val) { case KASAN_FREE_PAGE: + case KASAN_KMALLOC_FREE: bug_type = "use after free"; break; + case KASAN_PAGE_REDZONE: + case KASAN_KMALLOC_REDZONE: case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: bug_type = "out of bounds access"; break; @@ -80,6 +84,24 @@ static void print_address_description(struct access_info *info) if ((addr >= PAGE_OFFSET) && (addr < (unsigned long)high_memory)) { struct page *page = virt_to_head_page((void *)addr); + + if (PageSlab(page)) { + void *object; + struct kmem_cache *cache = page->slab_cache; + void *last_object; + + object = virt_to_obj(cache, page_address(page), + (void *)info->access_addr); + last_object = page_address(page) + + page->objects * cache->size; + + if (unlikely(object > last_object)) + object = last_object; /* we hit into padding */ + + object_err(cache, page, object, + "kasan: bad access detected"); + return; + } dump_page(page, "kasan: bad access detected"); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 6a7502d..40cef33 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -853,6 +853,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_kmem_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); + kasan_kmalloc_large(ret, size); return ret; } EXPORT_SYMBOL(kmalloc_order); @@ -1037,8 +1038,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, if (p) ks = ksize(p); - if (ks >= new_size) + if (ks >= new_size) { + kasan_krealloc((void *)p, new_size); return (void *)p; + } ret = kmalloc_track_caller(new_size, flags); if (ret && p) diff --git a/mm/slub.c b/mm/slub.c index 0d8eb4a..955155d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -33,6 +33,7 @@ #include <linux/stacktrace.h> #include <linux/prefetch.h> #include <linux/memcontrol.h> +#include <linux/kasan.h> #include <trace/events/kmem.h> @@ -469,10 +470,12 @@ static int disable_higher_order_debug; static inline void metadata_access_enable(void) { + kasan_disable_local(); } static inline void metadata_access_disable(void) { + kasan_enable_local(); } /* @@ -1242,11 +1245,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) { kmemleak_alloc(ptr, size, 1, flags); + kasan_kmalloc_large(ptr, size); } static inline void kfree_hook(const void *x) { kmemleak_free(x); + kasan_kfree_large(x); } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, @@ -1269,6 +1274,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); memcg_kmem_put_cache(s); + kasan_slab_alloc(s, object); } static inline void slab_free_hook(struct kmem_cache *s, void *x) @@ -1292,6 +1298,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) #endif if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(x, s->object_size); + + kasan_slab_free(s, x); } /* @@ -1386,8 +1394,11 @@ static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); - if (unlikely(s->ctor)) + if (unlikely(s->ctor)) { + kasan_unpoison_object_data(s, object); s->ctor(object); + kasan_poison_object_data(s, object); + } } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -1420,6 +1431,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) if (unlikely(s->flags & SLAB_POISON)) memset(start, POISON_INUSE, PAGE_SIZE << order); + kasan_poison_slab(page); + for_each_object_idx(p, idx, s, start, page->objects) { setup_object(s, page, p); if (likely(idx < page->objects)) @@ -2504,6 +2517,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); + kasan_kmalloc(s, ret, size); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); @@ -2530,6 +2544,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); + + kasan_kmalloc(s, ret, size); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); @@ -2915,6 +2931,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif + kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node)); init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); @@ -3287,6 +3304,8 @@ void *__kmalloc(size_t size, gfp_t flags) trace_kmalloc(_RET_IP_, ret, size, s->size, flags); + kasan_kmalloc(s, ret, size); + return ret; } EXPORT_SYMBOL(__kmalloc); @@ -3330,12 +3349,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); + kasan_kmalloc(s, ret, size); + return ret; } EXPORT_SYMBOL(__kmalloc_node); #endif -size_t ksize(const void *object) +static size_t __ksize(const void *object) { struct page *page; @@ -3351,6 +3372,15 @@ size_t ksize(const void *object) return slab_ksize(page->slab_cache); } + +size_t ksize(const void *object) +{ + size_t size = __ksize(object); + /* We assume that ksize callers could use whole allocated area, + so we need unpoison this area. */ + kasan_krealloc(object, size); + return size; +} EXPORT_SYMBOL(ksize); void kfree(const void *x) -- 2.0.4