The patch titled Subject: kasan, mm: don't save alloc stacks twice has been added to the -mm tree. Its filename is kasan-mm-dont-save-alloc-stacks-twice.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/kasan-mm-dont-save-alloc-stacks-twice.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/kasan-mm-dont-save-alloc-stacks-twice.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan, mm: don't save alloc stacks twice Patch series "kasan: optimizations and fixes for HW_TAGS". This patchset makes the HW_TAGS mode more efficient, mostly by reworking poisoning approaches and simplifying/inlining some internal helpers. With this change, the overhead of HW_TAGS annotations excluding setting and checking memory tags is ~3%. The performance impact caused by tags will be unknown until we have hardware that supports MTE. As a side-effect, this patchset speeds up generic KASAN by ~15%. This patch (of 12): Currently KASAN saves allocation stacks in both kasan_slab_alloc() and kasan_kmalloc() annotations. This patch changes KASAN to save allocation stacks for slab objects from kmalloc caches in kasan_kmalloc() only, and stacks for other slab objects in kasan_slab_alloc() only. This change requires ____kasan_kmalloc() knowing whether the object belongs to a kmalloc cache. This is implemented by adding a flag field to the kasan_info structure. That flag is only set for kmalloc caches via a new kasan_cache_create_kmalloc() annotation. Link: https://lkml.kernel.org/r/cover.1612208222.git.andreyknvl@xxxxxxxxxx Link: https://lkml.kernel.org/r/c153f78b173df7537c9be6f2f3a888ddf0b42a3b.1612208222.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Peter Collingbourne <pcc@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Branislav Rankov <Branislav.Rankov@xxxxxxx> Cc: Kevin Brodsky <kevin.brodsky@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 9 +++++++++ mm/kasan/common.c | 18 ++++++++++++++---- mm/slab_common.c | 1 + 3 files changed, 24 insertions(+), 4 deletions(-) --- a/include/linux/kasan.h~kasan-mm-dont-save-alloc-stacks-twice +++ a/include/linux/kasan.h @@ -83,6 +83,7 @@ static inline void kasan_disable_current struct kasan_cache { int alloc_meta_offset; int free_meta_offset; + bool is_kmalloc; }; #ifdef CONFIG_KASAN_HW_TAGS @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_ __kasan_cache_create(cache, size, flags); } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache); +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + if (kasan_enabled()) + __kasan_cache_create_kmalloc(cache); +} + size_t __kasan_metadata_size(struct kmem_cache *cache); static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) { @@ -278,6 +286,7 @@ static inline void kasan_free_pages(stru static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, --- a/mm/kasan/common.c~kasan-mm-dont-save-alloc-stacks-twice +++ a/mm/kasan/common.c @@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_ca *size = optimal_size; } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + cache->kasan_info.is_kmalloc = true; +} + size_t __kasan_metadata_size(struct kmem_cache *cache) { if (!kasan_stack_collection_enabled()) @@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr } } -static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) +static void set_alloc_info(struct kmem_cache *cache, void *object, + gfp_t flags, bool kmalloc) { struct kasan_alloc_meta *alloc_meta; + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ + if (cache->kasan_info.is_kmalloc && !kmalloc) + return; + alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) kasan_set_track(&alloc_meta->alloc_track, flags); } static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags, bool keep_tag) + size_t size, gfp_t flags, bool kmalloc) { unsigned long redzone_start; unsigned long redzone_end; @@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct km KASAN_GRANULE_SIZE); redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_GRANULE_SIZE); - tag = assign_tag(cache, object, false, keep_tag); + tag = assign_tag(cache, object, false, kmalloc); /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */ kasan_unpoison(set_tag(object, tag), size); @@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct km KASAN_KMALLOC_REDZONE); if (kasan_stack_collection_enabled()) - set_alloc_info(cache, (void *)object, flags); + set_alloc_info(cache, (void *)object, flags, kmalloc); return set_tag(object, tag); } --- a/mm/slab_common.c~kasan-mm-dont-save-alloc-stacks-twice +++ a/mm/slab_common.c @@ -568,6 +568,7 @@ struct kmem_cache *__init create_kmalloc panic("Out of memory when creating slab %s\n", name); create_boot_cache(s, name, size, flags, useroffset, usersize); + kasan_cache_create_kmalloc(s); list_add(&s->list, &slab_caches); s->refcount = 1; return s; _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-prefix-global-functions-with-kasan_.patch kasan-clarify-hw_tags-impact-on-tbi.patch kasan-clean-up-comments-in-tests.patch kasan-add-macros-to-simplify-checking-test-constraints.patch kasan-add-match-all-tag-tests.patch kasan-arm64-allow-using-kunit-tests-with-hw_tags-mode.patch kasan-rename-config_test_kasan_module.patch kasan-add-compiler-barriers-to-kunit_expect_kasan_fail.patch kasan-adapt-kmalloc_uaf2-test-to-hw_tags-mode.patch kasan-fix-memory-corruption-in-kasan_bitops_tags-test.patch kasan-move-_ret_ip_-to-inline-wrappers.patch kasan-fix-bug-detection-via-ksize-for-hw_tags-mode.patch kasan-add-proper-page-allocator-tests.patch kasan-add-a-test-for-kmem_cache_alloc-free_bulk.patch kasan-dont-run-tests-when-kasan-is-not-enabled.patch kasan-mm-dont-save-alloc-stacks-twice.patch kasan-mm-optimize-kmalloc-poisoning.patch kasan-optimize-large-kmalloc-poisoning.patch kasan-clean-up-setting-free-info-in-kasan_slab_free.patch kasan-unify-large-kfree-checks.patch kasan-rework-krealloc-tests.patch kasan-mm-remove-krealloc-side-effect.patch kasan-mm-optimize-krealloc-poisoning.patch kasan-ensure-poisoning-size-alignment.patch arm64-kasan-simplify-and-inline-mte-functions.patch kasan-always-inline-hw_tags-helper-functions.patch arm64-kasan-export-mte-symbols-for-kasan-tests.patch