+ mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/slab: refactor common ksize KASAN logic into slab_common.c
has been added to the -mm tree.  Its filename is
     mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Marco Elver <elver@xxxxxxxxxx>
Subject: mm/slab: refactor common ksize KASAN logic into slab_common.c

This refactors common code of ksize() between the various allocators into
slab_common.c: __ksize() is the allocator-specific implementation without
instrumentation, whereas ksize() includes the required KASAN logic.

Link: http://lkml.kernel.org/r/20190626142014.141844-5-elver@xxxxxxxxxx
Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Cc: Alexander Potapenko <glider@xxxxxxxxxx>
Cc: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/slab.h |    1 +
 mm/slab.c            |   24 ++++++------------------
 mm/slab_common.c     |   26 ++++++++++++++++++++++++++
 mm/slob.c            |    4 ++--
 mm/slub.c            |   14 ++------------
 5 files changed, 37 insertions(+), 32 deletions(-)

--- a/include/linux/slab.h~mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc
+++ a/include/linux/slab.h
@@ -184,6 +184,7 @@ void * __must_check __krealloc(const voi
 void * __must_check krealloc(const void *, size_t, gfp_t);
 void kfree(const void *);
 void kzfree(const void *);
+size_t __ksize(const void *);
 size_t ksize(const void *);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
--- a/mm/slab.c~mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc
+++ a/mm/slab.c
@@ -4204,23 +4204,15 @@ void __check_heap_object(const void *ptr
 #endif /* CONFIG_HARDENED_USERCOPY */
 
 /**
- * ksize - get the actual amount of memory allocated for a given object
- * @objp: Pointer to the object
+ * __ksize -- Uninstrumented ksize.
  *
- * kmalloc may internally round up allocations and return more memory
- * than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
- * The caller must guarantee that objp points to a valid object previously
- * allocated with either kmalloc() or kmem_cache_alloc(). The object
- * must not be freed during the duration of the call.
- *
- * Return: size of the actual memory used by @objp in bytes
+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
+ * safety checks as ksize() with KASAN instrumentation enabled.
  */
-size_t ksize(const void *objp)
+size_t __ksize(const void *objp)
 {
-	struct kmem_cache *c;
 	size_t size;
+	struct kmem_cache *c;
 
 	BUG_ON(!objp);
 	if (unlikely(objp == ZERO_SIZE_PTR))
@@ -4228,11 +4220,7 @@ size_t ksize(const void *objp)
 
 	c = virt_to_cache(objp);
 	size = c ? c->object_size : 0;
-	/* We assume that ksize callers could use the whole allocated area,
-	 * so we need to unpoison this area.
-	 */
-	kasan_unpoison_shadow(objp, size);
 
 	return size;
 }
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
--- a/mm/slab_common.c~mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc
+++ a/mm/slab_common.c
@@ -1597,6 +1597,32 @@ void kzfree(const void *p)
 }
 EXPORT_SYMBOL(kzfree);
 
+/**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+ *
+ * kmalloc may internally round up allocations and return more memory
+ * than requested. ksize() can be used to determine the actual amount of
+ * memory allocated. The caller may use this additional memory, even though
+ * a smaller amount of memory was initially specified with the kmalloc call.
+ * The caller must guarantee that objp points to a valid object previously
+ * allocated with either kmalloc() or kmem_cache_alloc(). The object
+ * must not be freed during the duration of the call.
+ *
+ * Return: size of the actual memory used by @objp in bytes
+ */
+size_t ksize(const void *objp)
+{
+	size_t size = __ksize(objp);
+	/*
+	 * We assume that ksize callers could use whole allocated area,
+	 * so we need to unpoison this area.
+	 */
+	kasan_unpoison_shadow(objp, size);
+	return size;
+}
+EXPORT_SYMBOL(ksize);
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
--- a/mm/slob.c~mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc
+++ a/mm/slob.c
@@ -527,7 +527,7 @@ void kfree(const void *block)
 EXPORT_SYMBOL(kfree);
 
 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
-size_t ksize(const void *block)
+size_t __ksize(const void *block)
 {
 	struct page *sp;
 	int align;
@@ -545,7 +545,7 @@ size_t ksize(const void *block)
 	m = (unsigned int *)(block - align);
 	return SLOB_UNITS(*m) * SLOB_UNIT;
 }
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
 
 int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
 {
--- a/mm/slub.c~mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc
+++ a/mm/slub.c
@@ -3895,7 +3895,7 @@ void __check_heap_object(const void *ptr
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
-static size_t __ksize(const void *object)
+size_t __ksize(const void *object)
 {
 	struct page *page;
 
@@ -3911,17 +3911,7 @@ static size_t __ksize(const void *object
 
 	return slab_ksize(page->slab_cache);
 }
-
-size_t ksize(const void *object)
-{
-	size_t size = __ksize(object);
-	/* We assume that ksize callers could use whole allocated area,
-	 * so we need to unpoison this area.
-	 */
-	kasan_unpoison_shadow(object, size);
-	return size;
-}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
 
 void kfree(const void *x)
 {
_

Patches currently in -mm which might be from elver@xxxxxxxxxx are

mm-kasan-print-frame-description-for-stack-bugs.patch
lib-test_kasan-add-bitops-tests.patch
x86-use-static_cpu_has-in-uaccess-region-to-avoid-instrumentation.patch
asm-generic-x86-add-bitops-instrumentation-for-kasan.patch
mm-kasan-introduce-__kasan_check_readwrite.patch
mm-kasan-change-kasan_check_readwrite-to-return-boolean.patch
lib-test_kasan-add-test-for-double-kzfree-detection.patch
mm-slab-refactor-common-ksize-kasan-logic-into-slab_commonc.patch
mm-kasan-add-object-validation-in-ksize.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux