[patch 051/119] kasan: make kasan_cache_create() work with 32-bit slab cache sizes

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Subject: kasan: make kasan_cache_create() work with 32-bit slab cache sizes

If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not
do it as well.

Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@xxxxxxxxx
Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Cc: Alexander Potapenko <glider@xxxxxxxxxx>
Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/kasan.h |    4 ++--
 mm/kasan/kasan.c      |   12 ++++++------
 mm/slab.c             |    2 +-
 mm/slub.c             |    2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff -puN include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes include/linux/kasan.h
--- a/include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes
+++ a/include/linux/kasan.h
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(co
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 			slab_flags_t *flags);
 void kasan_cache_shrink(struct kmem_cache *cache);
 void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(str
 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
 
 static inline void kasan_cache_create(struct kmem_cache *cache,
-				      size_t *size,
+				      unsigned int *size,
 				      slab_flags_t *flags) {}
 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff -puN mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/kasan/kasan.c
--- a/mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes
+++ a/mm/kasan/kasan.c
@@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page,
  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  * For larger allocations larger redzones are used.
  */
-static size_t optimal_redzone(size_t object_size)
+static unsigned int optimal_redzone(unsigned int object_size)
 {
-	int rz =
+	return
 		object_size <= 64        - 16   ? 16 :
 		object_size <= 128       - 32   ? 32 :
 		object_size <= 512       - 64   ? 64 :
@@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t obj
 		object_size <= (1 << 14) - 256  ? 256 :
 		object_size <= (1 << 15) - 512  ? 512 :
 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
-	return rz;
 }
 
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 			slab_flags_t *flags)
 {
+	unsigned int orig_size = *size;
 	int redzone_adjust;
-	int orig_size = *size;
 
 	/* Add alloc meta. */
 	cache->kasan_info.alloc_meta_offset = *size;
@@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cach
 	if (redzone_adjust > 0)
 		*size += redzone_adjust;
 
-	*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
+	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
+			max(*size, cache->object_size +
 					optimal_redzone(cache->object_size)));
 
 	/*
diff -puN mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slab.c
--- a/mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes
+++ a/mm/slab.c
@@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cach
 	size_t ralign = BYTES_PER_WORD;
 	gfp_t gfp;
 	int err;
-	size_t size = cachep->size;
+	unsigned int size = cachep->size;
 
 #if DEBUG
 #if FORCED_DEBUG
diff -puN mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slub.c
--- a/mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes
+++ a/mm/slub.c
@@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	slab_flags_t flags = s->flags;
-	size_t size = s->object_size;
+	unsigned int size = s->object_size;
 	int order;
 
 	/*
_
--
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux