+ mm-kmemleak-use-mempool-allocations-for-kmemleak-objects.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm: kmemleak: use mempool allocations for kmemleak objects
has been added to the -mm tree.  Its filename is
     mm-kmemleak-use-mempool-allocations-for-kmemleak-objects.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-kmemleak-use-mempool-allocations-for-kmemleak-objects.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-kmemleak-use-mempool-allocations-for-kmemleak-objects.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Catalin Marinas <catalin.marinas@xxxxxxx>
Subject: mm: kmemleak: use mempool allocations for kmemleak objects

Add mempool allocations for struct kmemleak_object and kmemleak_scan_area
as slightly more resilient than kmem_cache_alloc() under memory pressure. 
Additionally, mask out all the gfp flags passed to kmemleak other than
GFP_KERNEL|GFP_ATOMIC.

A boot-time tuning parameter (kmemleak.mempool) is added to allow a
different minimum pool size (defaulting to NR_CPUS * 4).

Link: http://lkml.kernel.org/r/20190727132334.9184-1-catalin.marinas@xxxxxxx
Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Suggested-by: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Qian Cai <cai@xxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 Documentation/admin-guide/kernel-parameters.txt |    6 +
 mm/kmemleak.c                                   |   58 +++++++++++---
 2 files changed, 54 insertions(+), 10 deletions(-)

--- a/Documentation/admin-guide/kernel-parameters.txt~mm-kmemleak-use-mempool-allocations-for-kmemleak-objects
+++ a/Documentation/admin-guide/kernel-parameters.txt
@@ -2011,6 +2011,12 @@
 			Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
 			the default is off.
 
+	kmemleak.mempool=
+			[KNL] Boot-time tuning of the minimum kmemleak
+			metadata pool size.
+			Format: <int>
+			Default: NR_CPUS * 4
+
 	kprobe_event=[probe-list]
 			[FTRACE] Add kprobe events and enable at boot time.
 			The probe-list is a semicolon delimited list of probe
--- a/mm/kmemleak.c~mm-kmemleak-use-mempool-allocations-for-kmemleak-objects
+++ a/mm/kmemleak.c
@@ -69,6 +69,7 @@
 #include <linux/kthread.h>
 #include <linux/rbtree.h>
 #include <linux/fs.h>
+#include <linux/mempool.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/cpumask.h>
@@ -112,9 +113,7 @@
 #define BYTES_PER_POINTER	sizeof(void *)
 
 /* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
-				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-				 __GFP_NOWARN)
+#define gfp_kmemleak_mask(gfp)	((gfp) & (GFP_KERNEL | GFP_ATOMIC))
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -190,7 +189,13 @@ static DEFINE_RWLOCK(kmemleak_lock);
 
 /* allocation caches for kmemleak internal data */
 static struct kmem_cache *object_cache;
+static mempool_t *object_mempool;
 static struct kmem_cache *scan_area_cache;
+static mempool_t *scan_area_mempool;
+
+/* default minimum memory pool sizes */
+static int min_object_pool = NR_CPUS * 4;
+static int min_scan_area_pool = NR_CPUS * 1;
 
 /* set if tracing memory operations is enabled */
 static int kmemleak_enabled;
@@ -465,9 +470,9 @@ static void free_object_rcu(struct rcu_h
 	 */
 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 		hlist_del(&area->node);
-		kmem_cache_free(scan_area_cache, area);
+		mempool_free(area, scan_area_mempool);
 	}
-	kmem_cache_free(object_cache, object);
+	mempool_free(object, object_mempool);
 }
 
 /*
@@ -550,7 +555,7 @@ static struct kmemleak_object *create_ob
 	struct rb_node **link, *rb_parent;
 	unsigned long untagged_ptr;
 
-	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+	object = mempool_alloc(object_mempool, gfp_kmemleak_mask(gfp));
 	if (!object) {
 		pr_warn("Cannot allocate a kmemleak_object structure\n");
 		kmemleak_disable();
@@ -614,7 +619,7 @@ static struct kmemleak_object *create_ob
 			 * be freed while the kmemleak_lock is held.
 			 */
 			dump_object_info(parent);
-			kmem_cache_free(object_cache, object);
+			mempool_free(object, object_mempool);
 			object = NULL;
 			goto out;
 		}
@@ -772,7 +777,7 @@ static void add_scan_area(unsigned long
 		return;
 	}
 
-	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
+	area = mempool_alloc(scan_area_mempool, gfp_kmemleak_mask(gfp));
 	if (!area) {
 		pr_warn("Cannot allocate a scan area\n");
 		goto out;
@@ -784,7 +789,7 @@ static void add_scan_area(unsigned long
 	} else if (ptr + size > object->pointer + object->size) {
 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 		dump_object_info(object);
-		kmem_cache_free(scan_area_cache, area);
+		mempool_free(area, scan_area_mempool);
 		goto out_unlock;
 	}
 
@@ -1993,6 +1998,27 @@ static int __init kmemleak_boot_config(c
 }
 early_param("kmemleak", kmemleak_boot_config);
 
+/*
+ * Allow boot-time tuning of the kmemleak mempool size.
+ */
+static int __init kmemleak_mempool_config(char *str)
+{
+	int size, ret;
+
+	if (!str)
+		return -EINVAL;
+
+	ret = kstrtoint(str, 0, &size);
+	if (ret)
+		return ret;
+
+	min_object_pool = size;
+	min_scan_area_pool = size / 4;
+
+	return 0;
+}
+early_param("kmemleak.mempool", kmemleak_mempool_config);
+
 static void __init print_log_trace(struct early_log *log)
 {
 	pr_notice("Early log backtrace:\n");
@@ -2020,6 +2046,18 @@ void __init kmemleak_init(void)
 
 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
+	if (!object_cache || !scan_area_cache) {
+		kmemleak_disable();
+		return;
+	}
+	object_mempool = mempool_create_slab_pool(min_object_pool,
+						  object_cache);
+	scan_area_mempool = mempool_create_slab_pool(min_scan_area_pool,
+						     scan_area_cache);
+	if (!object_mempool || !scan_area_mempool) {
+		kmemleak_disable();
+		return;
+	}
 
 	if (crt_early_log > ARRAY_SIZE(early_log))
 		pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
@@ -2126,7 +2164,7 @@ static int __init kmemleak_late_init(voi
 		mutex_unlock(&scan_mutex);
 	}
 
-	pr_info("Kernel memory leak detector initialized\n");
+	pr_info("Kernel memory leak detector initialized (mempool size: %d)\n", min_object_pool);
 
 	return 0;
 }
_

Patches currently in -mm which might be from catalin.marinas@xxxxxxx are

mm-kmemleak-use-mempool-allocations-for-kmemleak-objects.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux