+ slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: slub: make struct kmem_cache_order_objects::x unsigned int
has been added to the -mm tree.  Its filename is
     slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Subject: slub: make struct kmem_cache_order_objects::x unsigned int

struct kmem_cache_order_objects is for mixing order and number of objects,
and orders aren't bit enough to warrant 64-bit width.

Propagate unsignedness down so that everything fits.

!!! Patch assumes that "PAGE_SIZE << order" doesn't overflow. !!!

Link: http://lkml.kernel.org/r/20180305200730.15812-23-adobriyan@xxxxxxxxx
Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/slub_def.h |    2 -
 mm/slub.c                |   74 +++++++++++++++++++------------------
 2 files changed, 40 insertions(+), 36 deletions(-)

diff -puN include/linux/slub_def.h~slub-make-struct-kmem_cache_order_objects-x-unsigned-int include/linux/slub_def.h
--- a/include/linux/slub_def.h~slub-make-struct-kmem_cache_order_objects-x-unsigned-int
+++ a/include/linux/slub_def.h
@@ -73,7 +73,7 @@ struct kmem_cache_cpu {
  * given order would contain.
  */
 struct kmem_cache_order_objects {
-	unsigned long x;
+	unsigned int x;
 };
 
 /*
diff -puN mm/slub.c~slub-make-struct-kmem_cache_order_objects-x-unsigned-int mm/slub.c
--- a/mm/slub.c~slub-make-struct-kmem_cache_order_objects-x-unsigned-int
+++ a/mm/slub.c
@@ -316,13 +316,13 @@ static inline unsigned int slab_index(vo
 	return (p - addr) / s->size;
 }
 
-static inline int order_objects(int order, unsigned long size, int reserved)
+static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
 {
-	return ((PAGE_SIZE << order) - reserved) / size;
+	return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
 }
 
-static inline struct kmem_cache_order_objects oo_make(int order,
-		unsigned long size, int reserved)
+static inline struct kmem_cache_order_objects oo_make(unsigned int order,
+		unsigned int size, unsigned int reserved)
 {
 	struct kmem_cache_order_objects x = {
 		(order << OO_SHIFT) + order_objects(order, size, reserved)
@@ -331,12 +331,12 @@ static inline struct kmem_cache_order_ob
 	return x;
 }
 
-static inline int oo_order(struct kmem_cache_order_objects x)
+static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 {
 	return x.x >> OO_SHIFT;
 }
 
-static inline int oo_objects(struct kmem_cache_order_objects x)
+static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 {
 	return x.x & OO_MASK;
 }
@@ -1435,7 +1435,7 @@ static inline struct page *alloc_slab_pa
 		gfp_t flags, int node, struct kmem_cache_order_objects oo)
 {
 	struct page *page;
-	int order = oo_order(oo);
+	unsigned int order = oo_order(oo);
 
 	if (node == NUMA_NO_NODE)
 		page = alloc_pages(flags, order);
@@ -1454,8 +1454,8 @@ static inline struct page *alloc_slab_pa
 /* Pre-initialize the random sequence cache */
 static int init_cache_random_seq(struct kmem_cache *s)
 {
+	unsigned int count = oo_objects(s->oo);
 	int err;
-	unsigned long i, count = oo_objects(s->oo);
 
 	/* Bailout if already initialised */
 	if (s->random_seq)
@@ -1470,6 +1470,8 @@ static int init_cache_random_seq(struct
 
 	/* Transform to an offset on the set of pages */
 	if (s->random_seq) {
+		unsigned int i;
+
 		for (i = 0; i < count; i++)
 			s->random_seq[i] *= s->size;
 	}
@@ -2398,7 +2400,7 @@ slab_out_of_memory(struct kmem_cache *s,
 
 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
 		nid, gfpflags, &gfpflags);
-	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %d, min order: %d\n",
+	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
 		s->name, s->object_size, s->size, oo_order(s->oo),
 		oo_order(s->min));
 
@@ -3181,9 +3183,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
  * and increases the number of allocations possible without having to
  * take the list_lock.
  */
-static int slub_min_order;
-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
-static int slub_min_objects;
+static unsigned int slub_min_order;
+static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+static unsigned int slub_min_objects;
 
 /*
  * Calculate the order of allocation given an slab object size.
@@ -3210,20 +3212,21 @@ static int slub_min_objects;
  * requested a higher mininum order then we start with that one instead of
  * the smallest order which will fit the object.
  */
-static inline int slab_order(int size, int min_objects,
-				int max_order, int fract_leftover, int reserved)
+static inline unsigned int slab_order(unsigned int size,
+		unsigned int min_objects, unsigned int max_order,
+		unsigned int fract_leftover, unsigned int reserved)
 {
-	int order;
-	int rem;
-	int min_order = slub_min_order;
+	unsigned int min_order = slub_min_order;
+	unsigned int order;
 
 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
 
-	for (order = max(min_order, get_order(min_objects * size + reserved));
+	for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
 			order <= max_order; order++) {
 
-		unsigned long slab_size = PAGE_SIZE << order;
+		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
+		unsigned int rem;
 
 		rem = (slab_size - reserved) % size;
 
@@ -3234,12 +3237,11 @@ static inline int slab_order(int size, i
 	return order;
 }
 
-static inline int calculate_order(int size, int reserved)
+static inline int calculate_order(unsigned int size, unsigned int reserved)
 {
-	int order;
-	int min_objects;
-	int fraction;
-	int max_objects;
+	unsigned int order;
+	unsigned int min_objects;
+	unsigned int max_objects;
 
 	/*
 	 * Attempt to find best configuration for a slab. This
@@ -3256,6 +3258,8 @@ static inline int calculate_order(int si
 	min_objects = min(min_objects, max_objects);
 
 	while (min_objects > 1) {
+		unsigned int fraction;
+
 		fraction = 16;
 		while (fraction >= 4) {
 			order = slab_order(size, min_objects,
@@ -3458,7 +3462,7 @@ static int calculate_sizes(struct kmem_c
 {
 	slab_flags_t flags = s->flags;
 	unsigned int size = s->object_size;
-	int order;
+	unsigned int order;
 
 	/*
 	 * Round up object size to the next word boundary. We can only
@@ -3548,7 +3552,7 @@ static int calculate_sizes(struct kmem_c
 	else
 		order = calculate_order(size, s->reserved);
 
-	if (order < 0)
+	if ((int)order < 0)
 		return 0;
 
 	s->allocflags = 0;
@@ -3716,7 +3720,7 @@ int __kmem_cache_shutdown(struct kmem_ca
 
 static int __init setup_slub_min_order(char *str)
 {
-	get_option(&str, &slub_min_order);
+	get_option(&str, (int *)&slub_min_order);
 
 	return 1;
 }
@@ -3725,8 +3729,8 @@ __setup("slub_min_order=", setup_slub_mi
 
 static int __init setup_slub_max_order(char *str)
 {
-	get_option(&str, &slub_max_order);
-	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
+	get_option(&str, (int *)&slub_max_order);
+	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
 
 	return 1;
 }
@@ -3735,7 +3739,7 @@ __setup("slub_max_order=", setup_slub_ma
 
 static int __init setup_slub_min_objects(char *str)
 {
-	get_option(&str, &slub_min_objects);
+	get_option(&str, (int *)&slub_min_objects);
 
 	return 1;
 }
@@ -4230,7 +4234,7 @@ void __init kmem_cache_init(void)
 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
 				  slub_cpu_dead);
 
-	pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
+	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
 		cache_line_size(),
 		slub_min_order, slub_max_order, slub_min_objects,
 		nr_cpu_ids, nr_node_ids);
@@ -4906,17 +4910,17 @@ SLAB_ATTR_RO(object_size);
 
 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", oo_objects(s->oo));
+	return sprintf(buf, "%u\n", oo_objects(s->oo));
 }
 SLAB_ATTR_RO(objs_per_slab);
 
 static ssize_t order_store(struct kmem_cache *s,
 				const char *buf, size_t length)
 {
-	unsigned long order;
+	unsigned int order;
 	int err;
 
-	err = kstrtoul(buf, 10, &order);
+	err = kstrtouint(buf, 10, &order);
 	if (err)
 		return err;
 
@@ -4929,7 +4933,7 @@ static ssize_t order_store(struct kmem_c
 
 static ssize_t order_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", oo_order(s->oo));
+	return sprintf(buf, "%u\n", oo_order(s->oo));
 }
 SLAB_ATTR(order);
 
_

Patches currently in -mm which might be from adobriyan@xxxxxxxxx are

slab-mark-kmalloc-machinery-as-__ro_after_init.patch
slab-fixup-calculate_alignment-argument-type.patch
slab-make-kmalloc_index-return-unsigned-int.patch
slab-make-kmalloc_size-return-unsigned-int.patch
slab-make-create_kmalloc_cache-work-with-32-bit-sizes.patch
slab-make-create_boot_cache-work-with-32-bit-sizes.patch
slab-make-kmem_cache_create-work-with-32-bit-sizes.patch
slab-make-size_index-array-u8.patch
slab-make-size_index_elem-unsigned-int.patch
slub-make-remote_node_defrag_ratio-unsigned-int.patch
slub-make-max_attr_size-unsigned-int.patch
slub-make-red_left_pad-unsigned-int.patch
slub-make-reserved-unsigned-int.patch
slub-make-align-unsigned-int.patch
slub-make-inuse-unsigned-int.patch
slub-make-cpu_partial-unsigned-int.patch
slub-make-offset-unsigned-int.patch
slub-make-object_size-unsigned-int.patch
slub-make-size-unsigned-int.patch
slab-make-kmem_cache_flags-accept-32-bit-object-size.patch
kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch
slab-make-usercopy-region-32-bit.patch
slub-make-slab_index-return-unsigned-int.patch
slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch
slub-make-size_from_object-return-unsigned-int.patch
slab-use-32-bit-arithmetic-in-freelist_randomize.patch
proc-do-less-stuff-under-pde_unload_lock.patch
proc-move-proc-sysvipc-creation-to-where-it-belongs.patch
proc-faster-open-close-of-files-without-release-hook.patch
proc-randomize-struct-pde_opener.patch
proc-move-struct-pde_opener-to-kmem-cache.patch
proc-account-struct-pde_opener.patch
proc-check-permissions-earlier-for-proc-wchan.patch
proc-use-set_puts-at-proc-wchan.patch
proc-test-proc-self-wchan.patch
proc-test-proc-self-syscall.patch
uts-create-struct-uts_namespace-from-kmem_cache.patch
seq_file-delete-small-value-optimization.patch

--
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux