+ slab-make-kmem_cache_create-work-with-32-bit-sizes.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: slab: make kmem_cache_create() work with 32-bit sizes
has been added to the -mm tree.  Its filename is
     slab-make-kmem_cache_create-work-with-32-bit-sizes.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/slab-make-kmem_cache_create-work-with-32-bit-sizes.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/slab-make-kmem_cache_create-work-with-32-bit-sizes.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Subject: slab: make kmem_cache_create() work with 32-bit sizes

struct kmem_cache::size and ::align were always 32-bit.

Out of curiosity I created 4GB kmem_cache, it oopsed with division by 0.
kmem_cache_create(1UL<<32+1) created 1-byte cache as expected.

size_t doesn't work and never did.

Link: http://lkml.kernel.org/r/20180305200730.15812-6-adobriyan@xxxxxxxxx
Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/slab.h |    7 ++++---
 mm/slab.c            |    2 +-
 mm/slab.h            |    6 +++---
 mm/slab_common.c     |   19 ++++++++++---------
 mm/slub.c            |    2 +-
 5 files changed, 19 insertions(+), 17 deletions(-)

diff -puN include/linux/slab.h~slab-make-kmem_cache_create-work-with-32-bit-sizes include/linux/slab.h
--- a/include/linux/slab.h~slab-make-kmem_cache_create-work-with-32-bit-sizes
+++ a/include/linux/slab.h
@@ -137,11 +137,12 @@ bool slab_is_available(void);
 
 extern bool usercopy_fallback;
 
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
-			size_t align, slab_flags_t flags,
+struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
+			unsigned int align, slab_flags_t flags,
 			void (*ctor)(void *));
 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
-			size_t size, size_t align, slab_flags_t flags,
+			unsigned int size, unsigned int align,
+			slab_flags_t flags,
 			size_t useroffset, size_t usersize,
 			void (*ctor)(void *));
 void kmem_cache_destroy(struct kmem_cache *);
diff -puN mm/slab.c~slab-make-kmem_cache_create-work-with-32-bit-sizes mm/slab.c
--- a/mm/slab.c~slab-make-kmem_cache_create-work-with-32-bit-sizes
+++ a/mm/slab.c
@@ -1876,7 +1876,7 @@ slab_flags_t kmem_cache_flags(unsigned l
 }
 
 struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *))
 {
 	struct kmem_cache *cachep;
diff -puN mm/slab_common.c~slab-make-kmem_cache_create-work-with-32-bit-sizes mm/slab_common.c
--- a/mm/slab_common.c~slab-make-kmem_cache_create-work-with-32-bit-sizes
+++ a/mm/slab_common.c
@@ -82,7 +82,7 @@ unsigned int kmem_cache_size(struct kmem
 EXPORT_SYMBOL(kmem_cache_size);
 
 #ifdef CONFIG_DEBUG_VM
-static int kmem_cache_sanity_check(const char *name, size_t size)
+static int kmem_cache_sanity_check(const char *name, unsigned int size)
 {
 	struct kmem_cache *s = NULL;
 
@@ -113,7 +113,7 @@ static int kmem_cache_sanity_check(const
 	return 0;
 }
 #else
-static inline int kmem_cache_sanity_check(const char *name, size_t size)
+static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
 {
 	return 0;
 }
@@ -280,8 +280,8 @@ static inline void memcg_unlink_cache(st
  * Figure out what the alignment of the objects will be given a set of
  * flags, a user specified alignment and the size of the objects.
  */
-static unsigned long calculate_alignment(slab_flags_t flags,
-		unsigned long align, unsigned long size)
+static unsigned int calculate_alignment(slab_flags_t flags,
+		unsigned int align, unsigned int size)
 {
 	/*
 	 * If the user wants hardware cache aligned objects then follow that
@@ -291,7 +291,7 @@ static unsigned long calculate_alignment
 	 * alignment though. If that is greater then use it.
 	 */
 	if (flags & SLAB_HWCACHE_ALIGN) {
-		unsigned long ralign;
+		unsigned int ralign;
 
 		ralign = cache_line_size();
 		while (size <= ralign / 2)
@@ -331,7 +331,7 @@ int slab_unmergeable(struct kmem_cache *
 	return 0;
 }
 
-struct kmem_cache *find_mergeable(size_t size, size_t align,
+struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
 		slab_flags_t flags, const char *name, void (*ctor)(void *))
 {
 	struct kmem_cache *s;
@@ -379,7 +379,7 @@ struct kmem_cache *find_mergeable(size_t
 }
 
 static struct kmem_cache *create_cache(const char *name,
-		size_t object_size, size_t size, size_t align,
+		unsigned int object_size, unsigned int size, unsigned int align,
 		slab_flags_t flags, size_t useroffset,
 		size_t usersize, void (*ctor)(void *),
 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
@@ -452,7 +452,8 @@ out_free_cache:
  * as davem.
  */
 struct kmem_cache *
-kmem_cache_create_usercopy(const char *name, size_t size, size_t align,
+kmem_cache_create_usercopy(const char *name,
+		  unsigned int size, unsigned int align,
 		  slab_flags_t flags, size_t useroffset, size_t usersize,
 		  void (*ctor)(void *))
 {
@@ -532,7 +533,7 @@ out_unlock:
 EXPORT_SYMBOL(kmem_cache_create_usercopy);
 
 struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t align,
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 		slab_flags_t flags, void (*ctor)(void *))
 {
 	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
diff -puN mm/slab.h~slab-make-kmem_cache_create-work-with-32-bit-sizes mm/slab.h
--- a/mm/slab.h~slab-make-kmem_cache_create-work-with-32-bit-sizes
+++ a/mm/slab.h
@@ -101,11 +101,11 @@ extern void create_boot_cache(struct kme
 			unsigned int useroffset, unsigned int usersize);
 
 int slab_unmergeable(struct kmem_cache *s);
-struct kmem_cache *find_mergeable(size_t size, size_t align,
+struct kmem_cache *find_mergeable(unsigned size, unsigned align,
 		slab_flags_t flags, const char *name, void (*ctor)(void *));
 #ifndef CONFIG_SLOB
 struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *));
 
 slab_flags_t kmem_cache_flags(unsigned long object_size,
@@ -113,7 +113,7 @@ slab_flags_t kmem_cache_flags(unsigned l
 	void (*ctor)(void *));
 #else
 static inline struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *))
 { return NULL; }
 
diff -puN mm/slub.c~slab-make-kmem_cache_create-work-with-32-bit-sizes mm/slub.c
--- a/mm/slub.c~slab-make-kmem_cache_create-work-with-32-bit-sizes
+++ a/mm/slub.c
@@ -4241,7 +4241,7 @@ void __init kmem_cache_init_late(void)
 }
 
 struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *))
 {
 	struct kmem_cache *s, *c;
_

Patches currently in -mm which might be from adobriyan@xxxxxxxxx are

slab-mark-kmalloc-machinery-as-__ro_after_init.patch
slab-fixup-calculate_alignment-argument-type.patch
slab-make-kmalloc_index-return-unsigned-int.patch
slab-make-kmalloc_size-return-unsigned-int.patch
slab-make-create_kmalloc_cache-work-with-32-bit-sizes.patch
slab-make-create_boot_cache-work-with-32-bit-sizes.patch
slab-make-kmem_cache_create-work-with-32-bit-sizes.patch
slab-make-size_index-array-u8.patch
slab-make-size_index_elem-unsigned-int.patch
slub-make-remote_node_defrag_ratio-unsigned-int.patch
slub-make-max_attr_size-unsigned-int.patch
slub-make-red_left_pad-unsigned-int.patch
slub-make-reserved-unsigned-int.patch
slub-make-align-unsigned-int.patch
slub-make-inuse-unsigned-int.patch
slub-make-cpu_partial-unsigned-int.patch
slub-make-offset-unsigned-int.patch
slub-make-object_size-unsigned-int.patch
slub-make-size-unsigned-int.patch
slab-make-kmem_cache_flags-accept-32-bit-object-size.patch
kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch
slab-make-usercopy-region-32-bit.patch
slub-make-slab_index-return-unsigned-int.patch
slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch
slub-make-size_from_object-return-unsigned-int.patch
slab-use-32-bit-arithmetic-in-freelist_randomize.patch
proc-do-less-stuff-under-pde_unload_lock.patch
proc-move-proc-sysvipc-creation-to-where-it-belongs.patch
proc-faster-open-close-of-files-without-release-hook.patch
proc-randomize-struct-pde_opener.patch
proc-move-struct-pde_opener-to-kmem-cache.patch
proc-account-struct-pde_opener.patch
proc-check-permissions-earlier-for-proc-wchan.patch
proc-use-set_puts-at-proc-wchan.patch
proc-test-proc-self-wchan.patch
proc-test-proc-self-syscall.patch
uts-create-struct-uts_namespace-from-kmem_cache.patch
seq_file-delete-small-value-optimization.patch

--
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux