+ kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: kasan, mm, arm64: tag non slab memory allocated via pagealloc
has been added to the -mm tree.  Its filename is
     kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Subject: kasan, mm, arm64: tag non slab memory allocated via pagealloc

Tag-based KASAN doesn't check memory accesses through pointers tagged with
0xff.  When page_address is used to get pointer to memory that corresponds
to some page, the tag of the resulting pointer gets set to 0xff, even
though the allocated memory might have been tagged differently.

For slab pages it's impossible to recover the correct tag to return from
page_address, since the page might contain multiple slab objects tagged
with different values, and we can't know in advance which one of them is
going to get accessed.  For non slab pages however, we can recover the tag
in page_address, since the whole page was marked with the same tag.

This patch adds tagging to non slab memory allocated with pagealloc.  To
set the tag of the pointer returned from page_address, the tag gets stored
to page->flags when the memory gets allocated.

Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@xxxxxxxxxx
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Reviewed-by: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Reviewed-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 arch/arm64/include/asm/memory.h   |    8 ++++++-
 include/linux/mm.h                |   29 ++++++++++++++++++++++++++++
 include/linux/page-flags-layout.h |   10 +++++++++
 mm/cma.c                          |   11 ++++++++++
 mm/kasan/common.c                 |   15 ++++++++++++--
 mm/page_alloc.c                   |    1 
 mm/slab.c                         |    2 -
 7 files changed, 72 insertions(+), 4 deletions(-)

--- a/arch/arm64/include/asm/memory.h~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/arch/arm64/include/asm/memory.h
@@ -314,7 +314,13 @@ static inline void *phys_to_virt(phys_ad
 #define __virt_to_pgoff(kaddr)	(((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
 #define __page_to_voff(kaddr)	(((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
-#define page_to_virt(page)	((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+#define page_to_virt(page)	({					\
+	unsigned long __addr =						\
+		((__page_to_voff(page)) | PAGE_OFFSET);			\
+	__addr = __tag_set(__addr, page_kasan_tag(page));		\
+	((void *)__addr);						\
+})
+
 #define virt_to_page(vaddr)	((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
 
 #define _virt_addr_valid(kaddr)	pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
--- a/include/linux/mm.h~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/include/linux/mm.h
@@ -804,6 +804,7 @@ vm_fault_t finish_mkwrite_fault(struct v
 #define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
 #define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
 #define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF		(LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
 
 /*
  * Define the bit shifts to access each section.  For non-existent
@@ -814,6 +815,7 @@ vm_fault_t finish_mkwrite_fault(struct v
 #define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 #define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT	(KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
 
 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 #ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -836,6 +838,7 @@ vm_fault_t finish_mkwrite_fault(struct v
 #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
 #define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK		((1UL << KASAN_TAG_WIDTH) - 1)
 #define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 
 static inline enum zone_type page_zonenum(const struct page *page)
@@ -1101,6 +1104,32 @@ static inline bool cpupid_match_pid(stru
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifdef CONFIG_KASAN_SW_TAGS
+static inline u8 page_kasan_tag(const struct page *page)
+{
+	return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag)
+{
+	page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+	page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+}
+
+static inline void page_kasan_tag_reset(struct page *page)
+{
+	page_kasan_tag_set(page, 0xff);
+}
+#else
+static inline u8 page_kasan_tag(const struct page *page)
+{
+	return 0xff;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
+static inline void page_kasan_tag_reset(struct page *page) { }
+#endif
+
 static inline struct zone *page_zone(const struct page *page)
 {
 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
--- a/include/linux/page-flags-layout.h~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/include/linux/page-flags-layout.h
@@ -82,6 +82,16 @@
 #define LAST_CPUPID_WIDTH 0
 #endif
 
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_WIDTH 8
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
+	> BITS_PER_LONG - NR_PAGEFLAGS
+#error "KASAN: not enough bits in page flags for tag"
+#endif
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
 /*
  * We are going to use the flags for the page to node mapping if its in
  * there.  This includes the case where there is no node, so it is implicit.
--- a/mm/cma.c~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/mm/cma.c
@@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma,
 	unsigned long pfn = -1;
 	unsigned long start = 0;
 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+	size_t i;
 	struct page *page = NULL;
 	int ret = -ENOMEM;
 
@@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma,
 
 	trace_cma_alloc(pfn, page, count, align);
 
+	/*
+	 * CMA can allocate multiple page blocks, which results in different
+	 * blocks being marked with different tags. Reset the tags to ignore
+	 * those page blocks.
+	 */
+	if (page) {
+		for (i = 0; i < count; i++)
+			page_kasan_tag_reset(page + i);
+	}
+
 	if (ret && !no_warn) {
 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
 			__func__, count, ret);
--- a/mm/kasan/common.c~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/mm/kasan/common.c
@@ -220,8 +220,15 @@ void kasan_unpoison_stack_above_sp_to(co
 
 void kasan_alloc_pages(struct page *page, unsigned int order)
 {
+	u8 tag;
+	unsigned long i;
+
 	if (unlikely(PageHighMem(page)))
 		return;
+
+	tag = random_tag();
+	for (i = 0; i < (1 << order); i++)
+		page_kasan_tag_set(page + i, tag);
 	kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
 }
 
@@ -319,6 +326,10 @@ struct kasan_free_meta *get_free_info(st
 
 void kasan_poison_slab(struct page *page)
 {
+	unsigned long i;
+
+	for (i = 0; i < (1 << compound_order(page)); i++)
+		page_kasan_tag_reset(page + i);
 	kasan_poison_shadow(page_address(page),
 			PAGE_SIZE << compound_order(page),
 			KASAN_KMALLOC_REDZONE);
@@ -517,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsig
 	page = virt_to_head_page(ptr);
 
 	if (unlikely(!PageSlab(page))) {
-		if (reset_tag(ptr) != page_address(page)) {
+		if (ptr != page_address(page)) {
 			kasan_report_invalid_free(ptr, ip);
 			return;
 		}
@@ -530,7 +541,7 @@ void kasan_poison_kfree(void *ptr, unsig
 
 void kasan_kfree_large(void *ptr, unsigned long ip)
 {
-	if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
+	if (ptr != page_address(virt_to_head_page(ptr)))
 		kasan_report_invalid_free(ptr, ip);
 	/* The object will be poisoned by page_alloc. */
 }
--- a/mm/page_alloc.c~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/mm/page_alloc.c
@@ -1183,6 +1183,7 @@ static void __meminit __init_single_page
 	init_page_count(page);
 	page_mapcount_reset(page);
 	page_cpupid_reset_last(page);
+	page_kasan_tag_reset(page);
 
 	INIT_LIST_HEAD(&page->lru);
 #ifdef WANT_PAGE_VIRTUAL
--- a/mm/slab.c~kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc
+++ a/mm/slab.c
@@ -2357,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_
 	void *freelist;
 	void *addr = page_address(page);
 
-	page->s_mem = addr + colour_off;
+	page->s_mem = kasan_reset_tag(addr) + colour_off;
 	page->active = 0;
 
 	if (OBJFREELIST_SLAB(cachep))
_

Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are

kasan-mm-change-hooks-signatures.patch
kasan-slub-handle-pointer-tags-in-early_kmem_cache_node_alloc.patch
kasan-move-common-generic-and-tag-based-code-to-commonc.patch
kasan-rename-source-files-to-reflect-the-new-naming-scheme.patch
kasan-add-config_kasan_generic-and-config_kasan_sw_tags.patch
kasan-arm64-adjust-shadow-size-for-tag-based-mode.patch
kasan-rename-kasan_zero_page-to-kasan_early_shadow_page.patch
kasan-initialize-shadow-to-0xff-for-tag-based-mode.patch
arm64-move-untagged_addr-macro-from-uaccessh-to-memoryh.patch
kasan-add-tag-related-helper-functions.patch
kasan-arm64-untag-address-in-_virt_addr_is_linear.patch
kasan-preassign-tags-to-objects-with-ctors-or-slab_typesafe_by_rcu.patch
kasan-arm64-fix-up-fault-handling-logic.patch
kasan-arm64-enable-top-byte-ignore-for-the-kernel.patch
kasan-mm-perform-untagged-pointers-comparison-in-krealloc.patch
kasan-split-out-generic_reportc-from-reportc.patch
kasan-add-bug-reporting-routines-for-tag-based-mode.patch
mm-move-obj_to_index-to-include-linux-slab_defh.patch
kasan-add-hooks-implementation-for-tag-based-mode.patch
kasan-arm64-add-brk-handler-for-inline-instrumentation.patch
kasan-mm-arm64-tag-non-slab-memory-allocated-via-pagealloc.patch
kasan-add-__must_check-annotations-to-kasan-hooks.patch
kasan-arm64-select-have_arch_kasan_sw_tags.patch
kasan-update-documentation.patch
kasan-add-spdx-license-identifier-mark-to-source-files.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux