+ mm-slab-sanity-check-page-type-when-looking-up-cache.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/slab: sanity-check page type when looking up cache
has been added to the -mm tree.  Its filename is
     mm-slab-sanity-check-page-type-when-looking-up-cache.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-slab-sanity-check-page-type-when-looking-up-cache.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-slab-sanity-check-page-type-when-looking-up-cache.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Kees Cook <keescook@xxxxxxxxxxxx>
Subject: mm/slab: sanity-check page type when looking up cache

This avoids any possible type confusion when looking up an object.  For
example, if a non-slab were to be passed to kfree(), the invalid
slab_cache pointer (i.e.  overlapped with some other value from the struct
page union) would be used for subsequent slab manipulations that could
lead to further memory corruption.

Since the page is already in cache, adding the PageSlab() check will have
nearly zero cost, so add a check and WARN() to virt_to_cache(). 
Additionally replaces an open-coded virt_to_cache().  To support the
failure mode this also updates all callers of virt_to_cache() and
cache_from_obj() to handle a NULL cache pointer return value (though note
that several already handle this case gracefully).

Link: http://lkml.kernel.org/r/20190530045017.15252-3-keescook@xxxxxxxxxxxx
Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
Cc: Alexander Popov <alex.popov@xxxxxxxxx>
Cc: Alexander Potapenko <glider@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 mm/slab.c |   14 +++++++-------
 mm/slab.h |   17 +++++++++++++----
 2 files changed, 20 insertions(+), 11 deletions(-)

--- a/mm/slab.c~mm-slab-sanity-check-page-type-when-looking-up-cache
+++ a/mm/slab.c
@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_c
 static int slab_max_order = SLAB_MAX_ORDER_LO;
 static bool slab_max_order_set __initdata;
 
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
-	struct page *page = virt_to_head_page(obj);
-	return page->slab_cache;
-}
-
 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 				 unsigned int idx)
 {
@@ -3715,6 +3709,8 @@ void kmem_cache_free_bulk(struct kmem_ca
 			s = virt_to_cache(objp);
 		else
 			s = cache_from_obj(orig_s, objp);
+		if (!s)
+			continue;
 
 		debug_check_no_locks_freed(objp, s->object_size);
 		if (!(s->flags & SLAB_DEBUG_OBJECTS))
@@ -3749,6 +3745,8 @@ void kfree(const void *objp)
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
 	c = virt_to_cache(objp);
+	if (!c)
+		return;
 	debug_check_no_locks_freed(objp, c->object_size);
 
 	debug_check_no_obj_freed(objp, c->object_size);
@@ -4219,13 +4217,15 @@ void __check_heap_object(const void *ptr
  */
 size_t ksize(const void *objp)
 {
+	struct kmem_cache *c;
 	size_t size;
 
 	BUG_ON(!objp);
 	if (unlikely(objp == ZERO_SIZE_PTR))
 		return 0;
 
-	size = virt_to_cache(objp)->object_size;
+	c = virt_to_cache(objp);
+	size = c ? c->object_size : 0;
 	/* We assume that ksize callers could use the whole allocated area,
 	 * so we need to unpoison this area.
 	 */
--- a/mm/slab.h~mm-slab-sanity-check-page-type-when-looking-up-cache
+++ a/mm/slab.h
@@ -350,10 +350,20 @@ static inline void memcg_link_cache(stru
 
 #endif /* CONFIG_MEMCG_KMEM */
 
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+	struct page *page;
+
+	page = virt_to_head_page(obj);
+	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
+					__func__))
+		return NULL;
+	return page->slab_cache;
+}
+
 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 {
 	struct kmem_cache *cachep;
-	struct page *page;
 
 	/*
 	 * When kmemcg is not being used, both assignments should return the
@@ -367,9 +377,8 @@ static inline struct kmem_cache *cache_f
 	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
 		return s;
 
-	page = virt_to_head_page(x);
-	cachep = page->slab_cache;
-	WARN_ONCE(!slab_equal_or_root(cachep, s),
+	cachep = virt_to_cache(x);
+	WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
 		  "%s: Wrong slab cache. %s but object is from %s\n",
 		  __func__, s->name, cachep->name);
 	return cachep;
_

Patches currently in -mm which might be from keescook@xxxxxxxxxxxx are

mm-slab-validate-cache-membership-under-freelist-hardening.patch
mm-slab-sanity-check-page-type-when-looking-up-cache.patch
lkdtm-heap-add-tests-for-freelist-hardening.patch
lib-test_overflow-avoid-tainting-the-kernel-and-fix-wrap-size.patch
mm-kconfig-fix-neighboring-typos.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux