The patch titled Subject: kasan: always inline HW_TAGS helper functions has been added to the -mm tree. Its filename is kasan-always-inline-hw_tags-helper-functions.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/kasan-always-inline-hw_tags-helper-functions.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/kasan-always-inline-hw_tags-helper-functions.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan: always inline HW_TAGS helper functions Mark all static functions in common.c and kasan.h that are used for hardware tag-based KASAN as __always_inline to avoid unnecessary function calls. Link: https://lkml.kernel.org/r/05a45017b4cb15344395650e880bbab0fe6ba3e4.1612208222.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Branislav Rankov <Branislav.Rankov@xxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Kevin Brodsky <kevin.brodsky@xxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Peter Collingbourne <pcc@xxxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/kasan/common.c | 13 +++++++------ mm/kasan/kasan.h | 6 +++--- 2 files changed, 10 insertions(+), 9 deletions(-) --- a/mm/kasan/common.c~kasan-always-inline-hw_tags-helper-functions +++ a/mm/kasan/common.c @@ -279,7 +279,8 @@ void __kasan_poison_object_data(struct k * based on objects indexes, so that objects that are next to each other * get different tags. */ -static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init) +static __always_inline u8 assign_tag(struct kmem_cache *cache, + const void *object, bool init) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return 0xff; @@ -321,8 +322,8 @@ void * __must_check __kasan_init_slab_ob return (void *)object; } -static bool ____kasan_slab_free(struct kmem_cache *cache, void *object, - unsigned long ip, bool quarantine) +static __always_inline bool ____kasan_slab_free(struct kmem_cache *cache, + void *object, unsigned long ip, bool quarantine) { u8 tag; void *tagged_object; @@ -366,7 +367,7 @@ bool __kasan_slab_free(struct kmem_cache return ____kasan_slab_free(cache, object, ip, true); } -static bool ____kasan_kfree_large(void *ptr, unsigned long ip) +static __always_inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) { kasan_report_invalid_free(ptr, ip); @@ -461,8 +462,8 @@ void * __must_check __kasan_slab_alloc(s return tagged_object; } -static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags) +static __always_inline void *____kasan_kmalloc(struct kmem_cache *cache, + const void *object, size_t size, gfp_t flags) { unsigned long redzone_start; unsigned long redzone_end; --- a/mm/kasan/kasan.h~kasan-always-inline-hw_tags-helper-functions +++ a/mm/kasan/kasan.h @@ -318,7 +318,7 @@ static inline u8 kasan_random_tag(void) #ifdef CONFIG_KASAN_HW_TAGS -static inline void kasan_poison(const void *addr, size_t size, u8 value) +static __always_inline void kasan_poison(const void *addr, size_t size, u8 value) { addr = kasan_reset_tag(addr); @@ -334,7 +334,7 @@ static inline void kasan_poison(const vo hw_set_mem_tag_range((void *)addr, size, value); } -static inline void kasan_unpoison(const void *addr, size_t size) +static __always_inline void kasan_unpoison(const void *addr, size_t size) { u8 tag = get_tag(addr); @@ -349,7 +349,7 @@ static inline void kasan_unpoison(const hw_set_mem_tag_range((void *)addr, size, tag); } -static inline bool kasan_byte_accessible(const void *addr) +static __always_inline bool kasan_byte_accessible(const void *addr) { u8 ptr_tag = get_tag(addr); u8 mem_tag = hw_get_mem_tag((void *)addr); _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-prefix-global-functions-with-kasan_.patch kasan-clarify-hw_tags-impact-on-tbi.patch kasan-clean-up-comments-in-tests.patch kasan-add-macros-to-simplify-checking-test-constraints.patch kasan-add-match-all-tag-tests.patch kasan-arm64-allow-using-kunit-tests-with-hw_tags-mode.patch kasan-rename-config_test_kasan_module.patch kasan-add-compiler-barriers-to-kunit_expect_kasan_fail.patch kasan-adapt-kmalloc_uaf2-test-to-hw_tags-mode.patch kasan-fix-memory-corruption-in-kasan_bitops_tags-test.patch kasan-move-_ret_ip_-to-inline-wrappers.patch kasan-fix-bug-detection-via-ksize-for-hw_tags-mode.patch kasan-add-proper-page-allocator-tests.patch kasan-add-a-test-for-kmem_cache_alloc-free_bulk.patch kasan-dont-run-tests-when-kasan-is-not-enabled.patch kasan-mm-dont-save-alloc-stacks-twice.patch kasan-mm-optimize-kmalloc-poisoning.patch kasan-optimize-large-kmalloc-poisoning.patch kasan-clean-up-setting-free-info-in-kasan_slab_free.patch kasan-unify-large-kfree-checks.patch kasan-rework-krealloc-tests.patch kasan-mm-remove-krealloc-side-effect.patch kasan-mm-optimize-krealloc-poisoning.patch kasan-ensure-poisoning-size-alignment.patch arm64-kasan-simplify-and-inline-mte-functions.patch kasan-always-inline-hw_tags-helper-functions.patch arm64-kasan-export-mte-symbols-for-kasan-tests.patch