The patch titled Subject: kasan: add tag related helper functions has been added to the -mm tree. Its filename is kasan-add-tag-related-helper-functions.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/kasan-add-tag-related-helper-functions.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/kasan-add-tag-related-helper-functions.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan: add tag related helper functions This commit adds a few helper functions, that are meant to be used to work with tags embedded in the top byte of kernel pointers: to set, to get or to reset the top byte. Link: http://lkml.kernel.org/r/643b46fbcd6433a4be18b3a19ce9f3e727618a8d.1543337629.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/arch/arm64/include/asm/kasan.h~kasan-add-tag-related-helper-functions +++ a/arch/arm64/include/asm/kasan.h @@ -4,12 +4,16 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_KASAN - #include <linux/linkage.h> #include <asm/memory.h> #include <asm/pgtable-types.h> +#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) +#define arch_kasan_reset_tag(addr) __tag_reset(addr) +#define arch_kasan_get_tag(addr) __tag_get(addr) + +#ifdef CONFIG_KASAN + /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, --- a/arch/arm64/include/asm/memory.h~kasan-add-tag-related-helper-functions +++ a/arch/arm64/include/asm/memory.h @@ -219,6 +219,18 @@ static inline unsigned long kaslr_offset #define untagged_addr(addr) \ ((__typeof__(addr))sign_extend64((u64)(addr), 55)) +#ifdef CONFIG_KASAN_SW_TAGS +#define __tag_shifted(tag) ((u64)(tag) << 56) +#define __tag_set(addr, tag) (__typeof__(addr))( \ + ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag)) +#define __tag_reset(addr) untagged_addr(addr) +#define __tag_get(addr) (__u8)((u64)(addr) >> 56) +#else +#define __tag_set(addr, tag) (addr) +#define __tag_reset(addr) (addr) +#define __tag_get(addr) 0 +#endif + /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h --- a/arch/arm64/mm/kasan_init.c~kasan-add-tag-related-helper-functions +++ a/arch/arm64/mm/kasan_init.c @@ -253,6 +253,8 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + kasan_init_tags(); + /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); --- a/include/linux/kasan.h~kasan-add-tag-related-helper-functions +++ a/include/linux/kasan.h @@ -169,6 +169,19 @@ static inline void kasan_cache_shutdown( #define KASAN_SHADOW_INIT 0xFF +void kasan_init_tags(void); + +void *kasan_reset_tag(const void *addr); + +#else /* CONFIG_KASAN_SW_TAGS */ + +static inline void kasan_init_tags(void) { } + +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)addr; +} + #endif /* CONFIG_KASAN_SW_TAGS */ #endif /* LINUX_KASAN_H */ --- a/mm/kasan/kasan.h~kasan-add-tag-related-helper-functions +++ a/mm/kasan/kasan.h @@ -8,6 +8,10 @@ #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) +#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ +#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ +#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ + #define KASAN_FREE_PAGE 0xFF /* page was freed */ #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ @@ -126,6 +130,33 @@ static inline void quarantine_reduce(voi static inline void quarantine_remove_cache(struct kmem_cache *cache) { } #endif +#ifdef CONFIG_KASAN_SW_TAGS + +u8 random_tag(void); + +#else + +static inline u8 random_tag(void) +{ + return 0; +} + +#endif + +#ifndef arch_kasan_set_tag +#define arch_kasan_set_tag(addr, tag) ((void *)(addr)) +#endif +#ifndef arch_kasan_reset_tag +#define arch_kasan_reset_tag(addr) ((void *)(addr)) +#endif +#ifndef arch_kasan_get_tag +#define arch_kasan_get_tag(addr) 0 +#endif + +#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) +#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr)) +#define get_tag(addr) arch_kasan_get_tag(addr) + /* * Exported functions for interfaces called from assembly or from generated * code. Declarations here to avoid warning about missing declarations. --- a/mm/kasan/tags.c~kasan-add-tag-related-helper-functions +++ a/mm/kasan/tags.c @@ -38,6 +38,43 @@ #include "kasan.h" #include "../slab.h" +static DEFINE_PER_CPU(u32, prng_state); + +void kasan_init_tags(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(prng_state, cpu) = get_random_u32(); +} + +/* + * If a preemption happens between this_cpu_read and this_cpu_write, the only + * side effect is that we'll give a few allocated in different contexts objects + * the same tag. Since tag-based KASAN is meant to be used a probabilistic + * bug-detection debug feature, this doesn't have significant negative impact. + * + * Ideally the tags use strong randomness to prevent any attempts to predict + * them during explicit exploit attempts. But strong randomness is expensive, + * and we did an intentional trade-off to use a PRNG. This non-atomic RMW + * sequence has in fact positive effect, since interrupts that randomly skew + * PRNG at unpredictable points do only good. + */ +u8 random_tag(void) +{ + u32 state = this_cpu_read(prng_state); + + state = 1664525 * state + 1013904223; + this_cpu_write(prng_state, state); + + return (u8)(state % (KASAN_TAG_MAX + 1)); +} + +void *kasan_reset_tag(const void *addr) +{ + return reset_tag(addr); +} + void check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-mm-change-hooks-signatures.patch kasan-slub-handle-pointer-tags-in-early_kmem_cache_node_alloc.patch kasan-move-common-generic-and-tag-based-code-to-commonc.patch kasan-rename-source-files-to-reflect-the-new-naming-scheme.patch kasan-add-config_kasan_generic-and-config_kasan_sw_tags.patch kasan-arm64-adjust-shadow-size-for-tag-based-mode.patch kasan-rename-kasan_zero_page-to-kasan_early_shadow_page.patch kasan-initialize-shadow-to-0xff-for-tag-based-mode.patch arm64-move-untagged_addr-macro-from-uaccessh-to-memoryh.patch kasan-add-tag-related-helper-functions.patch kasan-arm64-untag-address-in-_virt_addr_is_linear.patch kasan-preassign-tags-to-objects-with-ctors-or-slab_typesafe_by_rcu.patch kasan-arm64-fix-up-fault-handling-logic.patch kasan-arm64-enable-top-byte-ignore-for-the-kernel.patch kasan-mm-perform-untagged-pointers-comparison-in-krealloc.patch kasan-split-out-generic_reportc-from-reportc.patch kasan-add-bug-reporting-routines-for-tag-based-mode.patch mm-move-obj_to_index-to-include-linux-slab_defh.patch kasan-add-hooks-implementation-for-tag-based-mode.patch kasan-arm64-add-brk-handler-for-inline-instrumentation.patch kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch kasan-add-__must_check-annotations-to-kasan-hooks.patch kasan-arm64-select-have_arch_kasan_sw_tags.patch kasan-update-documentation.patch kasan-add-spdx-license-identifier-mark-to-source-files.patch