On allocation kasan colours a page with a random tag and stores such tag in page->flags so that a subsequent page_to_virt() reconstructs the correct tagged pointer. However, when such page is mapped in user-space with PROT_MTE, the kernel's initial tag is overridden. Ensure that such pages have the tag reset (match-all) at allocation time since any late clearing of the tag is racy with other page_to_virt() dereferencing. Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> --- include/linux/gfp.h | 10 +++++++--- mm/page_alloc.c | 9 ++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 3e3d36fc2109..88b1d4fe4dcb 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -58,13 +58,15 @@ struct vm_area_struct; #define ___GFP_SKIP_ZERO 0x1000000u #define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u #define ___GFP_SKIP_KASAN_POISON 0x4000000u +#define ___GFP_PAGE_KASAN_TAG_RESET 0x8000000u #else #define ___GFP_SKIP_ZERO 0 #define ___GFP_SKIP_KASAN_UNPOISON 0 #define ___GFP_SKIP_KASAN_POISON 0 +#define ___GFP_PAGE_KASAN_TAG_RESET 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x8000000u +#define ___GFP_NOLOCKDEP 0x10000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -259,12 +261,13 @@ struct vm_area_struct; #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) #define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) +#define __GFP_PAGE_KASAN_TAG_RESET ((__force gfp_t)___GFP_PAGE_KASAN_TAG_RESET) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (28 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -343,7 +346,8 @@ struct vm_area_struct; #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) -#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ + __GFP_PAGE_KASAN_TAG_RESET) #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0e42038382c1..f9018a84f4e3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2382,6 +2382,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && !should_skip_init(gfp_flags); bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); + int i; set_page_private(page, 0); set_page_refcounted(page); @@ -2407,8 +2408,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, * should be initialized as well). */ if (init_tags) { - int i; - /* Initialize both memory and tags. */ for (i = 0; i != 1 << order; ++i) tag_clear_highpage(page + i); @@ -2430,7 +2429,11 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) SetPageSkipKASanPoison(page); - + /* if match-all page address required, reset the tag */ + if (gfp_flags & __GFP_PAGE_KASAN_TAG_RESET) { + for (i = 0; i != 1 << order; ++i) + page_kasan_tag_reset(page + i); + }; set_page_owner(page, order, gfp_flags); page_table_check_alloc(page, order); }