From: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> Using stack_depot to record kmemleak's backtrace which has been implemented on slub for reducing redundant information. Signed-off-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> Acked-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: ke.wang <ke.wang@xxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Zhaoyang Huang <huangzhaoyang@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- changes of v2: fix bugs of stack_depot_init related issue changes of v3: have DEBUG_KMEMLEAK select STACK_DEPOT by default remove unuse functions --- --- lib/Kconfig.debug | 1 + mm/kmemleak.c | 48 +++++++++++++++++++++++++++++------------------- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index bcbe60d..0def8e0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -717,6 +717,7 @@ config DEBUG_KMEMLEAK select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS select CRC32 + select STACKDEPOT help Say Y here if you want to enable the memory leak detector. The memory allocation/freeing is traced in a way diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 1eddc01..cedc6f3 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -79,6 +79,7 @@ #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/stacktrace.h> +#include <linux/stackdepot.h> #include <linux/cache.h> #include <linux/percpu.h> #include <linux/memblock.h> @@ -159,8 +160,7 @@ struct kmemleak_object { u32 checksum; /* memory ranges to be scanned inside an object (empty for all) */ struct hlist_head area_list; - unsigned long trace[MAX_TRACE]; - unsigned int trace_len; + depot_stack_handle_t trace_handle; unsigned long jiffies; /* creation timestamp */ pid_t pid; /* pid of the current task */ char comm[TASK_COMM_LEN]; /* executable name */ @@ -343,21 +343,24 @@ static bool unreferenced_object(struct kmemleak_object *object) * print_unreferenced function must be called with the object->lock held. */ static void print_unreferenced(struct seq_file *seq, - struct kmemleak_object *object) + struct kmemleak_object *object) { int i; + unsigned long *entries; + unsigned int nr_entries; unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); + nr_entries = stack_depot_fetch(object->trace_handle, &entries); warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", - object->pointer, object->size); + object->pointer, object->size); warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", - object->comm, object->pid, object->jiffies, - msecs_age / 1000, msecs_age % 1000); + object->comm, object->pid, object->jiffies, + msecs_age / 1000, msecs_age % 1000); hex_dump_object(seq, object); warn_or_seq_printf(seq, " backtrace:\n"); - for (i = 0; i < object->trace_len; i++) { - void *ptr = (void *)object->trace[i]; + for (i = 0; i < nr_entries; i++) { + void *ptr = (void *)entries[i]; warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); } } @@ -370,15 +373,16 @@ static void print_unreferenced(struct seq_file *seq, static void dump_object_info(struct kmemleak_object *object) { pr_notice("Object 0x%08lx (size %zu):\n", - object->pointer, object->size); + object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", - object->comm, object->pid, object->jiffies); + object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%x\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); - stack_trace_print(object->trace, object->trace_len, 4); + if (object->trace_handle) + stack_depot_print(object->trace_handle); } /* @@ -591,12 +595,18 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali return object; } -/* - * Save stack trace to the given array of MAX_TRACE size. - */ -static int __save_stack_trace(unsigned long *trace) +static noinline depot_stack_handle_t set_track_prepare(void) { - return stack_trace_save(trace, MAX_TRACE, 2); + depot_stack_handle_t trace_handle; + unsigned long entries[MAX_TRACE]; + unsigned int nr_entries; + + if (!kmemleak_initialized) + return 0; + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); + trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + + return trace_handle; } /* @@ -654,7 +664,7 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, } /* kernel backtrace */ - object->trace_len = __save_stack_trace(object->trace); + object->trace_handle = set_track_prepare(); raw_spin_lock_irqsave(&kmemleak_lock, flags); @@ -694,7 +704,6 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, rb_link_node(&object->rb_node, rb_parent, link); rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root : &object_tree_root); - list_add_tail_rcu(&object->object_list, &object_list); out: raw_spin_unlock_irqrestore(&kmemleak_lock, flags); @@ -1094,7 +1103,7 @@ void __ref kmemleak_update_trace(const void *ptr) } raw_spin_lock_irqsave(&object->lock, flags); - object->trace_len = __save_stack_trace(object->trace); + object->trace_handle = set_track_prepare(); raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); @@ -2064,6 +2073,7 @@ void __init kmemleak_init(void) if (kmemleak_error) return; + stack_depot_init(); jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); -- 1.9.1