The patch titled Subject: kasan, vmalloc: add vmalloc tagging for SW_TAGS has been added to the -mm tree. Its filename is kasan-vmalloc-add-vmalloc-tagging-for-sw_tags.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/kasan-vmalloc-add-vmalloc-tagging-for-sw_tags.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/kasan-vmalloc-add-vmalloc-tagging-for-sw_tags.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan, vmalloc: add vmalloc tagging for SW_TAGS Add vmalloc tagging support to SW_TAGS KASAN. - __kasan_unpoison_vmalloc() now assigns a random pointer tag, poisons the virtual mapping accordingly, and embeds the tag into the returned pointer. - __get_vm_area_node() (used by vmalloc() and vmap()) and pcpu_get_vm_areas() save the tagged pointer into vm_struct->addr (note: not into vmap_area->addr). This requires putting kasan_unpoison_vmalloc() after setup_vmalloc_vm[_locked](); otherwise the latter will overwrite the tagged pointer. The tagged pointer then is naturally propagateed to vmalloc() and vmap(). - vm_map_ram() returns the tagged pointer directly. As a result of this change, vm_struct->addr is now tagged. Enabling KASAN_VMALLOC with SW_TAGS is not yet allowed. Link: https://lkml.kernel.org/r/4a78f3c064ce905e9070c29733aca1dd254a74f1.1643047180.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Acked-by: Marco Elver <elver@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Peter Collingbourne <pcc@xxxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 16 ++++++++++------ mm/kasan/shadow.c | 6 ++++-- mm/vmalloc.c | 14 ++++++++------ 3 files changed, 22 insertions(+), 14 deletions(-) --- a/include/linux/kasan.h~kasan-vmalloc-add-vmalloc-tagging-for-sw_tags +++ a/include/linux/kasan.h @@ -424,12 +424,13 @@ void kasan_release_vmalloc(unsigned long unsigned long free_region_start, unsigned long free_region_end); -void __kasan_unpoison_vmalloc(const void *start, unsigned long size); -static __always_inline void kasan_unpoison_vmalloc(const void *start, - unsigned long size) +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size); +static __always_inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size) { if (kasan_enabled()) - __kasan_unpoison_vmalloc(start, size); + return __kasan_unpoison_vmalloc(start, size); + return (void *)start; } void __kasan_poison_vmalloc(const void *start, unsigned long size); @@ -454,8 +455,11 @@ static inline void kasan_release_vmalloc unsigned long free_region_start, unsigned long free_region_end) { } -static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ } +static inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size) +{ + return (void *)start; +} static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } --- a/mm/kasan/shadow.c~kasan-vmalloc-add-vmalloc-tagging-for-sw_tags +++ a/mm/kasan/shadow.c @@ -475,12 +475,14 @@ void kasan_release_vmalloc(unsigned long } } -void __kasan_unpoison_vmalloc(const void *start, unsigned long size) +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size) { if (!is_vmalloc_or_module_addr(start)) - return; + return (void *)start; + start = set_tag(start, kasan_random_tag()); kasan_unpoison(start, size, false); + return (void *)start; } /* --- a/mm/vmalloc.c~kasan-vmalloc-add-vmalloc-tagging-for-sw_tags +++ a/mm/vmalloc.c @@ -2210,7 +2210,7 @@ void *vm_map_ram(struct page **pages, un mem = (void *)addr; } - kasan_unpoison_vmalloc(mem, size); + mem = kasan_unpoison_vmalloc(mem, size); if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { @@ -2443,10 +2443,10 @@ static struct vm_struct *__get_vm_area_n return NULL; } - kasan_unpoison_vmalloc((void *)va->va_start, requested_size); - setup_vmalloc_vm(area, va, flags, caller); + area->addr = kasan_unpoison_vmalloc(area->addr, requested_size); + return area; } @@ -3795,9 +3795,6 @@ retry: for (area = 0; area < nr_vms; area++) { if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) goto err_free_shadow; - - kasan_unpoison_vmalloc((void *)vas[area]->va_start, - sizes[area]); } /* insert all vm's */ @@ -3810,6 +3807,11 @@ retry: } spin_unlock(&vmap_area_lock); + /* mark allocated areas as accessible */ + for (area = 0; area < nr_vms; area++) + vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, + vms[area]->size); + kfree(vas); return vms; _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-page_alloc-deduplicate-should_skip_kasan_poison.patch kasan-page_alloc-move-tag_clear_highpage-out-of-kernel_init_free_pages.patch kasan-page_alloc-merge-kasan_free_pages-into-free_pages_prepare.patch kasan-page_alloc-simplify-kasan_poison_pages-call-site.patch kasan-page_alloc-init-memory-of-skipped-pages-on-free.patch kasan-drop-skip_kasan_poison-variable-in-free_pages_prepare.patch mm-clarify-__gfp_zerotags-comment.patch kasan-only-apply-__gfp_zerotags-when-memory-is-zeroed.patch kasan-page_alloc-refactor-init-checks-in-post_alloc_hook.patch kasan-page_alloc-merge-kasan_alloc_pages-into-post_alloc_hook.patch kasan-page_alloc-combine-tag_clear_highpage-calls-in-post_alloc_hook.patch kasan-page_alloc-move-setpageskipkasanpoison-in-post_alloc_hook.patch kasan-page_alloc-move-kernel_init_free_pages-in-post_alloc_hook.patch kasan-page_alloc-rework-kasan_unpoison_pages-call-site.patch kasan-clean-up-metadata-byte-definitions.patch kasan-define-kasan_vmalloc_invalid-for-sw_tags.patch kasan-x86-arm64-s390-rename-functions-for-modules-shadow.patch kasan-vmalloc-drop-outdated-vm_kasan-comment.patch kasan-reorder-vmalloc-hooks.patch kasan-add-wrappers-for-vmalloc-hooks.patch kasan-vmalloc-reset-tags-in-vmalloc-functions.patch kasan-fork-reset-pointer-tags-of-vmapped-stacks.patch kasan-arm64-reset-pointer-tags-of-vmapped-stacks.patch kasan-vmalloc-add-vmalloc-tagging-for-sw_tags.patch kasan-vmalloc-arm64-mark-vmalloc-mappings-as-pgprot_tagged.patch kasan-vmalloc-unpoison-vm_alloc-pages-after-mapping.patch kasan-mm-only-define-___gfp_skip_kasan_poison-with-hw_tags.patch kasan-page_alloc-allow-skipping-unpoisoning-for-hw_tags.patch kasan-page_alloc-allow-skipping-memory-init-for-hw_tags.patch kasan-vmalloc-add-vmalloc-tagging-for-hw_tags.patch kasan-vmalloc-only-tag-normal-vmalloc-allocations.patch kasan-arm64-dont-tag-executable-vmalloc-allocations.patch kasan-mark-kasan_arg_stacktrace-as-__initdata.patch kasan-clean-up-feature-flags-for-hw_tags-mode.patch kasan-add-kasanvmalloc-command-line-flag.patch kasan-allow-enabling-kasan_vmalloc-and-sw-hw_tags.patch arm64-select-kasan_vmalloc-for-sw-hw_tags-modes.patch kasan-documentation-updates.patch kasan-improve-vmalloc-tests.patch