The patch titled Subject: arm64: mte: add in-kernel MTE helpers has been added to the -mm tree. Its filename is arm64-mte-add-in-kernel-mte-helpers.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/arm64-mte-add-in-kernel-mte-helpers.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/arm64-mte-add-in-kernel-mte-helpers.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Subject: arm64: mte: add in-kernel MTE helpers Provide helper functions to manipulate allocation and pointer tags for kernel addresses. Low-level helper functions (mte_assign_*, written in assembly) operate tag values from the [0x0, 0xF] range. High-level helper functions (mte_get/set_*) use the [0xF0, 0xFF] range to preserve compatibility with normal kernel pointers that have 0xFF in their top byte. MTE_GRANULE_SIZE and related definitions are moved to mte-def.h header that doesn't have any dependencies and is safe to include into any low-level header. Link: https://lkml.kernel.org/r/c31bf759b4411b2d98cdd801eb928e241584fd1f.1606161801.git.andreyknvl@xxxxxxxxxx Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Co-developed-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx> Tested-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Branislav Rankov <Branislav.Rankov@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Kevin Brodsky <kevin.brodsky@xxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/esr.h | 1 arch/arm64/include/asm/mte-def.h | 15 +++++++ arch/arm64/include/asm/mte-kasan.h | 56 +++++++++++++++++++++++++++ arch/arm64/include/asm/mte.h | 20 ++++++--- arch/arm64/kernel/mte.c | 48 +++++++++++++++++++++++ arch/arm64/lib/mte.S | 16 +++++++ 6 files changed, 150 insertions(+), 6 deletions(-) --- a/arch/arm64/include/asm/esr.h~arm64-mte-add-in-kernel-mte-helpers +++ a/arch/arm64/include/asm/esr.h @@ -105,6 +105,7 @@ #define ESR_ELx_FSC (0x3F) #define ESR_ELx_FSC_TYPE (0x3C) #define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_MTE (0x11) #define ESR_ELx_FSC_SERROR (0x11) #define ESR_ELx_FSC_ACCESS (0x08) #define ESR_ELx_FSC_FAULT (0x04) --- /dev/null +++ a/arch/arm64/include/asm/mte-def.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_MTE_DEF_H +#define __ASM_MTE_DEF_H + +#define MTE_GRANULE_SIZE UL(16) +#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) +#define MTE_TAG_SHIFT 56 +#define MTE_TAG_SIZE 4 +#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) +#define MTE_TAG_MAX (MTE_TAG_MASK >> MTE_TAG_SHIFT) + +#endif /* __ASM_MTE_DEF_H */ --- a/arch/arm64/include/asm/mte.h~arm64-mte-add-in-kernel-mte-helpers +++ a/arch/arm64/include/asm/mte.h @@ -5,14 +5,16 @@ #ifndef __ASM_MTE_H #define __ASM_MTE_H -#define MTE_GRANULE_SIZE UL(16) -#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) -#define MTE_TAG_SHIFT 56 -#define MTE_TAG_SIZE 4 +#include <asm/compiler.h> +#include <asm/mte-def.h> + +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" #ifndef __ASSEMBLY__ +#include <linux/bitfield.h> #include <linux/page-flags.h> +#include <linux/types.h> #include <asm/pgtable-types.h> @@ -45,7 +47,9 @@ long get_mte_ctrl(struct task_struct *ta int mte_ptrace_copy_tags(struct task_struct *child, long request, unsigned long addr, unsigned long data); -#else +void mte_assign_mem_tag_range(void *addr, size_t size); + +#else /* CONFIG_ARM64_MTE */ /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 @@ -80,7 +84,11 @@ static inline int mte_ptrace_copy_tags(s return -EIO; } -#endif +static inline void mte_assign_mem_tag_range(void *addr, size_t size) +{ +} + +#endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_MTE_H */ --- /dev/null +++ a/arch/arm64/include/asm/mte-kasan.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_MTE_KASAN_H +#define __ASM_MTE_KASAN_H + +#include <asm/mte-def.h> + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +/* + * The functions below are meant to be used only for the + * KASAN_HW_TAGS interface defined in asm/memory.h. + */ +#ifdef CONFIG_ARM64_MTE + +static inline u8 mte_get_ptr_tag(void *ptr) +{ + /* Note: The format of KASAN tags is 0xF<x> */ + u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT); + + return tag; +} + +u8 mte_get_mem_tag(void *addr); +u8 mte_get_random_tag(void); +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); + +#else /* CONFIG_ARM64_MTE */ + +static inline u8 mte_get_ptr_tag(void *ptr) +{ + return 0xFF; +} + +static inline u8 mte_get_mem_tag(void *addr) +{ + return 0xFF; +} +static inline u8 mte_get_random_tag(void) +{ + return 0xFF; +} +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + return addr; +} + +#endif /* CONFIG_ARM64_MTE */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_MTE_KASAN_H */ --- a/arch/arm64/kernel/mte.c~arm64-mte-add-in-kernel-mte-helpers +++ a/arch/arm64/kernel/mte.c @@ -13,10 +13,13 @@ #include <linux/swap.h> #include <linux/swapops.h> #include <linux/thread_info.h> +#include <linux/types.h> #include <linux/uio.h> +#include <asm/barrier.h> #include <asm/cpufeature.h> #include <asm/mte.h> +#include <asm/mte-kasan.h> #include <asm/ptrace.h> #include <asm/sysreg.h> @@ -72,6 +75,51 @@ int memcmp_pages(struct page *page1, str return ret; } +u8 mte_get_mem_tag(void *addr) +{ + if (!system_supports_mte()) + return 0xFF; + + asm(__MTE_PREAMBLE "ldg %0, [%0]" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +u8 mte_get_random_tag(void) +{ + void *addr; + + if (!system_supports_mte()) + return 0xFF; + + asm(__MTE_PREAMBLE "irg %0, %0" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + void *ptr = addr; + + if ((!system_supports_mte()) || (size == 0)) + return addr; + + /* Make sure that size is MTE granule aligned. */ + WARN_ON(size & (MTE_GRANULE_SIZE - 1)); + + /* Make sure that the address is MTE granule aligned. */ + WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1)); + + tag = 0xF0 | tag; + ptr = (void *)__tag_set(ptr, tag); + + mte_assign_mem_tag_range(ptr, size); + + return ptr; +} + static void update_sctlr_el1_tcf0(u64 tcf0) { /* ISB required for the kernel uaccess routines */ --- a/arch/arm64/lib/mte.S~arm64-mte-add-in-kernel-mte-helpers +++ a/arch/arm64/lib/mte.S @@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags) ret SYM_FUNC_END(mte_restore_page_tags) + +/* + * Assign allocation tags for a region of memory based on the pointer tag + * x0 - source pointer + * x1 - size + * + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and + * size must be non-zero and MTE_GRANULE_SIZE aligned. + */ +SYM_FUNC_START(mte_assign_mem_tag_range) +1: stg x0, [x0] + add x0, x0, #MTE_GRANULE_SIZE + subs x1, x1, #MTE_GRANULE_SIZE + b.gt 1b + ret +SYM_FUNC_END(mte_assign_mem_tag_range) _ Patches currently in -mm which might be from vincenzo.frascino@xxxxxxx are mm-vmalloc-fix-kasan-shadow-poisoning-size.patch arm64-enable-armv85-a-asm-arch-option.patch arm64-mte-add-in-kernel-mte-helpers.patch arm64-mte-reset-the-page-tag-in-page-flags.patch arm64-mte-add-in-kernel-tag-fault-handler.patch arm64-kasan-allow-enabling-in-kernel-mte.patch arm64-mte-convert-gcr_user-into-an-exclude-mask.patch arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit.patch kasan-mm-untag-page-address-in-free_reserved_area.patch kselftest-arm64-check-gcr_el1-after-context-switch.patch