The patch titled Subject: arm64: mte: switch GCR_EL1 in kernel entry and exit has been added to the -mm tree. Its filename is arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Subject: arm64: mte: switch GCR_EL1 in kernel entry and exit When MTE is present, the GCR_EL1 register contains the tags mask that allows to exclude tags from the random generation via the IRG instruction. With the introduction of the new Tag-Based KASAN API that provides a mechanism to reserve tags for special reasons, the MTE implementation has to make sure that the GCR_EL1 setting for the kernel does not affect the userspace processes and viceversa. Save and restore the kernel/user mask in GCR_EL1 in kernel entry and exit. Link: https://lkml.kernel.org/r/578b03294708cc7258fad0dc9c2a2e809e5a8214.1606161801.git.andreyknvl@xxxxxxxxxx Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Co-developed-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx> Tested-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Branislav Rankov <Branislav.Rankov@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Kevin Brodsky <kevin.brodsky@xxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/mte-def.h | 1 arch/arm64/include/asm/mte-kasan.h | 5 +++ arch/arm64/include/asm/mte.h | 2 + arch/arm64/kernel/asm-offsets.c | 3 + arch/arm64/kernel/entry.S | 41 +++++++++++++++++++++++++++ arch/arm64/kernel/mte.c | 31 ++++++++++++++++++-- 6 files changed, 79 insertions(+), 4 deletions(-) --- a/arch/arm64/include/asm/mte-def.h~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/include/asm/mte-def.h @@ -10,6 +10,5 @@ #define MTE_TAG_SHIFT 56 #define MTE_TAG_SIZE 4 #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) -#define MTE_TAG_MAX (MTE_TAG_MASK >> MTE_TAG_SHIFT) #endif /* __ASM_MTE_DEF_H */ --- a/arch/arm64/include/asm/mte.h~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/include/asm/mte.h @@ -18,6 +18,8 @@ #include <asm/pgtable-types.h> +extern u64 gcr_kernel_excl; + void mte_clear_page_tags(void *addr); unsigned long mte_copy_tags_from_user(void *to, const void __user *from, unsigned long n); --- a/arch/arm64/include/asm/mte-kasan.h~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/include/asm/mte-kasan.h @@ -30,6 +30,7 @@ u8 mte_get_random_tag(void); void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); void mte_enable_kernel(void); +void mte_init_tags(u64 max_tag); #else /* CONFIG_ARM64_MTE */ @@ -55,6 +56,10 @@ static inline void mte_enable_kernel(voi { } +static inline void mte_init_tags(u64 max_tag) +{ +} + #endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ --- a/arch/arm64/kernel/asm-offsets.c~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/kernel/asm-offsets.c @@ -48,6 +48,9 @@ int main(void) DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); #endif +#ifdef CONFIG_ARM64_MTE + DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl)); +#endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); --- a/arch/arm64/kernel/entry.S~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/kernel/entry.S @@ -173,6 +173,43 @@ alternative_else_nop_endif #endif .endm + .macro mte_set_gcr, tmp, tmp2 +#ifdef CONFIG_ARM64_MTE + /* + * Calculate and set the exclude mask preserving + * the RRND (bit[16]) setting. + */ + mrs_s \tmp2, SYS_GCR_EL1 + bfi \tmp2, \tmp, #0, #16 + msr_s SYS_GCR_EL1, \tmp2 + isb +#endif + .endm + + .macro mte_set_kernel_gcr, tmp, tmp2 +#ifdef CONFIG_KASAN_HW_TAGS +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + ldr_l \tmp, gcr_kernel_excl + + mte_set_gcr \tmp, \tmp2 +1: +#endif + .endm + + .macro mte_set_user_gcr, tsk, tmp, tmp2 +#ifdef CONFIG_ARM64_MTE +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER] + + mte_set_gcr \tmp, \tmp2 +1: +#endif + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@ -212,6 +249,8 @@ alternative_else_nop_endif ptrauth_keys_install_kernel tsk, x20, x22, x23 + mte_set_kernel_gcr x22, x23 + scs_load tsk, x20 .else add x21, sp, #S_FRAME_SIZE @@ -330,6 +369,8 @@ alternative_else_nop_endif /* No kernel C function calls after this as user keys are set. */ ptrauth_keys_install_user tsk, x0, x1, x2 + mte_set_user_gcr tsk, x0, x1 + apply_ssbd 0, x0, x1 .endif --- a/arch/arm64/kernel/mte.c~arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit +++ a/arch/arm64/kernel/mte.c @@ -23,6 +23,8 @@ #include <asm/ptrace.h> #include <asm/sysreg.h> +u64 gcr_kernel_excl __ro_after_init; + static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) { pte_t old_pte = READ_ONCE(*ptep); @@ -129,6 +131,26 @@ void *mte_set_mem_tag_range(void *addr, return ptr; } +void mte_init_tags(u64 max_tag) +{ + static bool gcr_kernel_excl_initialized; + + if (!gcr_kernel_excl_initialized) { + /* + * The format of the tags in KASAN is 0xFF and in MTE is 0xF. + * This conversion extracts an MTE tag from a KASAN tag. + */ + u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT, + max_tag), 0); + + gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK; + gcr_kernel_excl_initialized = true; + } + + /* Enable the kernel exclude mask for random tags generation. */ + write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1); +} + void mte_enable_kernel(void) { /* Enable MTE Sync Mode for EL1. */ @@ -171,7 +193,11 @@ static void update_gcr_el1_excl(u64 excl static void set_gcr_el1_excl(u64 excl) { current->thread.gcr_user_excl = excl; - update_gcr_el1_excl(excl); + + /* + * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value + * by mte_set_user_gcr() in kernel_exit, + */ } void flush_mte_state(void) @@ -197,7 +223,6 @@ void mte_thread_switch(struct task_struc /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); - update_gcr_el1_excl(next->thread.gcr_user_excl); } void mte_suspend_exit(void) @@ -205,7 +230,7 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return; - update_gcr_el1_excl(current->thread.gcr_user_excl); + update_gcr_el1_excl(gcr_kernel_excl); } long set_mte_ctrl(struct task_struct *task, unsigned long arg) _ Patches currently in -mm which might be from vincenzo.frascino@xxxxxxx are mm-vmalloc-fix-kasan-shadow-poisoning-size.patch arm64-enable-armv85-a-asm-arch-option.patch arm64-mte-add-in-kernel-mte-helpers.patch arm64-mte-reset-the-page-tag-in-page-flags.patch arm64-mte-add-in-kernel-tag-fault-handler.patch arm64-kasan-allow-enabling-in-kernel-mte.patch arm64-mte-convert-gcr_user-into-an-exclude-mask.patch arm64-mte-switch-gcr_el1-in-kernel-entry-and-exit.patch kasan-mm-untag-page-address-in-free_reserved_area.patch kselftest-arm64-check-gcr_el1-after-context-switch.patch