The patch titled Subject: arm64: kasan: simplify and inline MTE functions has been added to the -mm tree. Its filename is arm64-kasan-simplify-and-inline-mte-functions.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/arm64-kasan-simplify-and-inline-mte-functions.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/arm64-kasan-simplify-and-inline-mte-functions.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: arm64: kasan: simplify and inline MTE functions This change provides a simpler implementation of mte_get_mem_tag(), mte_get_random_tag(), and mte_set_mem_tag_range(). Simplifications include removing system_supports_mte() checks as these functions are onlye called from KASAN runtime that had already checked system_supports_mte(). Besides that, size and address alignment checks are removed from mte_set_mem_tag_range(), as KASAN now does those. This change also moves these functions into the asm/mte-kasan.h header and implements mte_set_mem_tag_range() via inline assembly to avoid unnecessary functions calls. Link: https://lkml.kernel.org/r/17d6bef698d193f5fe0d8baee0e232a351e23a32.1612208222.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Co-developed-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Branislav Rankov <Branislav.Rankov@xxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Kevin Brodsky <kevin.brodsky@xxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Peter Collingbourne <pcc@xxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/cache.h | 1 arch/arm64/include/asm/kasan.h | 1 arch/arm64/include/asm/mte-def.h | 2 arch/arm64/include/asm/mte-kasan.h | 64 ++++++++++++++++++++++++--- arch/arm64/include/asm/mte.h | 2 arch/arm64/kernel/mte.c | 46 ------------------- arch/arm64/lib/mte.S | 16 ------ 7 files changed, 60 insertions(+), 72 deletions(-) --- a/arch/arm64/include/asm/cache.h~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/include/asm/cache.h @@ -6,7 +6,6 @@ #define __ASM_CACHE_H #include <asm/cputype.h> -#include <asm/mte-kasan.h> #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 --- a/arch/arm64/include/asm/kasan.h~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/include/asm/kasan.h @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/memory.h> +#include <asm/mte-kasan.h> #include <asm/pgtable-types.h> #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) --- a/arch/arm64/include/asm/mte-def.h~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/include/asm/mte-def.h @@ -11,4 +11,6 @@ #define MTE_TAG_SIZE 4 #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" + #endif /* __ASM_MTE_DEF_H */ --- a/arch/arm64/include/asm/mte.h~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/include/asm/mte.h @@ -8,8 +8,6 @@ #include <asm/compiler.h> #include <asm/mte-def.h> -#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" - #ifndef __ASSEMBLY__ #include <linux/bitfield.h> --- a/arch/arm64/include/asm/mte-kasan.h~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/include/asm/mte-kasan.h @@ -11,13 +11,16 @@ #include <linux/types.h> +#ifdef CONFIG_ARM64_MTE + /* - * The functions below are meant to be used only for the - * KASAN_HW_TAGS interface defined in asm/memory.h. + * These functions are meant to be only used from KASAN runtime through + * the arch_*() interface defined in asm/memory.h. + * These functions don't include system_supports_mte() checks, + * as KASAN only calls them when MTE is supported and enabled. */ -#ifdef CONFIG_ARM64_MTE -static inline u8 mte_get_ptr_tag(void *ptr) +static __always_inline u8 mte_get_ptr_tag(void *ptr) { /* Note: The format of KASAN tags is 0xF<x> */ u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT); @@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *p return tag; } -u8 mte_get_mem_tag(void *addr); -u8 mte_get_random_tag(void); -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); +/* Get allocation tag for the address. */ +static __always_inline u8 mte_get_mem_tag(void *addr) +{ + asm(__MTE_PREAMBLE "ldg %0, [%0]" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* Generate a random tag. */ +static __always_inline u8 mte_get_random_tag(void) +{ + void *addr; + + asm(__MTE_PREAMBLE "irg %0, %0" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* + * Assign allocation tags for a region of memory based on the pointer tag. + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and + * size must be non-zero and MTE_GRANULE_SIZE aligned. + */ +static __always_inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + u64 curr, end; + + if (!size) + return; + + curr = (u64)__tag_set(addr, tag); + end = curr + size; + + do { + /* + * 'asm volatile' is required to prevent the compiler to move + * the statement outside of the loop. + */ + asm volatile(__MTE_PREAMBLE "stg %0, [%0]" + : + : "r" (curr) + : "memory"); + + curr += MTE_GRANULE_SIZE; + } while (curr != end); +} void mte_enable_kernel(void); void mte_init_tags(u64 max_tag); @@ -46,10 +94,12 @@ static inline u8 mte_get_mem_tag(void *a { return 0xFF; } + static inline u8 mte_get_random_tag(void) { return 0xFF; } + static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) { return addr; --- a/arch/arm64/kernel/mte.c~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/kernel/mte.c @@ -19,7 +19,6 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> #include <asm/mte.h> -#include <asm/mte-kasan.h> #include <asm/ptrace.h> #include <asm/sysreg.h> @@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, str return ret; } -u8 mte_get_mem_tag(void *addr) -{ - if (!system_supports_mte()) - return 0xFF; - - asm(__MTE_PREAMBLE "ldg %0, [%0]" - : "+r" (addr)); - - return mte_get_ptr_tag(addr); -} - -u8 mte_get_random_tag(void) -{ - void *addr; - - if (!system_supports_mte()) - return 0xFF; - - asm(__MTE_PREAMBLE "irg %0, %0" - : "+r" (addr)); - - return mte_get_ptr_tag(addr); -} - -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) -{ - void *ptr = addr; - - if ((!system_supports_mte()) || (size == 0)) - return addr; - - /* Make sure that size is MTE granule aligned. */ - WARN_ON(size & (MTE_GRANULE_SIZE - 1)); - - /* Make sure that the address is MTE granule aligned. */ - WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1)); - - tag = 0xF0 | tag; - ptr = (void *)__tag_set(ptr, tag); - - mte_assign_mem_tag_range(ptr, size); - - return ptr; -} - void mte_init_tags(u64 max_tag) { static bool gcr_kernel_excl_initialized; --- a/arch/arm64/lib/mte.S~arm64-kasan-simplify-and-inline-mte-functions +++ a/arch/arm64/lib/mte.S @@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags) ret SYM_FUNC_END(mte_restore_page_tags) - -/* - * Assign allocation tags for a region of memory based on the pointer tag - * x0 - source pointer - * x1 - size - * - * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and - * size must be non-zero and MTE_GRANULE_SIZE aligned. - */ -SYM_FUNC_START(mte_assign_mem_tag_range) -1: stg x0, [x0] - add x0, x0, #MTE_GRANULE_SIZE - subs x1, x1, #MTE_GRANULE_SIZE - b.gt 1b - ret -SYM_FUNC_END(mte_assign_mem_tag_range) _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-prefix-global-functions-with-kasan_.patch kasan-clarify-hw_tags-impact-on-tbi.patch kasan-clean-up-comments-in-tests.patch kasan-add-macros-to-simplify-checking-test-constraints.patch kasan-add-match-all-tag-tests.patch kasan-arm64-allow-using-kunit-tests-with-hw_tags-mode.patch kasan-rename-config_test_kasan_module.patch kasan-add-compiler-barriers-to-kunit_expect_kasan_fail.patch kasan-adapt-kmalloc_uaf2-test-to-hw_tags-mode.patch kasan-fix-memory-corruption-in-kasan_bitops_tags-test.patch kasan-move-_ret_ip_-to-inline-wrappers.patch kasan-fix-bug-detection-via-ksize-for-hw_tags-mode.patch kasan-add-proper-page-allocator-tests.patch kasan-add-a-test-for-kmem_cache_alloc-free_bulk.patch kasan-dont-run-tests-when-kasan-is-not-enabled.patch kasan-mm-dont-save-alloc-stacks-twice.patch kasan-mm-optimize-kmalloc-poisoning.patch kasan-optimize-large-kmalloc-poisoning.patch kasan-clean-up-setting-free-info-in-kasan_slab_free.patch kasan-unify-large-kfree-checks.patch kasan-rework-krealloc-tests.patch kasan-mm-remove-krealloc-side-effect.patch kasan-mm-optimize-krealloc-poisoning.patch kasan-ensure-poisoning-size-alignment.patch arm64-kasan-simplify-and-inline-mte-functions.patch kasan-always-inline-hw_tags-helper-functions.patch arm64-kasan-export-mte-symbols-for-kasan-tests.patch