We might be only interested in knowing about stacks <-> count relationship, so instead of having to fiddle with page_owner output and screen through pfns, let us add a new file called 'page_owner_stacks' that does just that. By cating such file, we will get all the stacktraces followed by its counter (allocated - freed times), so we can have a more specific overview. Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> --- include/linux/stackdepot.h | 2 ++ lib/stackdepot.c | 40 ++++++++++++++++++++++++++++++++++++++ mm/page_owner.c | 30 ++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 5ee0cf5be88f..20f62039f23a 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -25,6 +25,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, gfp_t gfp_flags, bool can_alloc, stack_action_t action); void stack_depot_dec_count(depot_stack_handle_t handle); +int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos, + unsigned long *last_stack); /* * Every user of stack depot has to call stack_depot_init() during its own init diff --git a/lib/stackdepot.c b/lib/stackdepot.c index aeb59d3557e2..3090ae0f3958 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -526,3 +526,43 @@ depot_stack_handle_t stack_depot_save_action(unsigned long *entries, return __stack_depot_save(entries, nr_entries, alloc_flags, true, action); } EXPORT_SYMBOL_GPL(stack_depot_save_action); + +int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos, + unsigned long *last_stack) +{ + struct stack_record *stack = NULL, *last; + struct stack_record **stacks; + int i = *pos, ret = 0; + + /* Continue from the last week if we have one */ + if (*last_stack) { + last = (struct stack_record *)*last_stack; + stack = last->next; + } else { +new_table: + stacks = &stack_table[i]; + stack = (struct stack_record *)stacks; + } + + for (; stack; stack = stack->next) { + if (!stack->size || stack->size < 0 || + stack->size > size || stack->handle.valid != 1 || + refcount_read(&stack->count) < 1) + continue; + + ret += stack_trace_snprint(buf, size, stack->entries, stack->size, 0); + ret += scnprintf(buf + ret, size - ret, "stack count: %d\n\n", + refcount_read(&stack->count)); + *last_stack = (unsigned long)stack; + return ret; + } + + i++; + *pos = i; + + /* Keep looking all tables for valid stacks */ + if (i < STACK_HASH_SIZE) + goto new_table; + + return 0; +} diff --git a/mm/page_owner.c b/mm/page_owner.c index 794f346d7520..8c67c7eb2451 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -43,6 +43,8 @@ static depot_stack_handle_t early_handle; static void init_early_allocated_pages(void); +static unsigned long last_stack = 0; + static int __init early_page_owner_param(char *buf) { int ret = kstrtobool(buf, &page_owner_enabled); @@ -663,6 +665,32 @@ static void init_early_allocated_pages(void) init_zones_in_node(pgdat); } +static ssize_t read_page_owner_stacks(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + char *kbuf; + int ret = 0; + + count = min_t(size_t, count, PAGE_SIZE); + kbuf = kmalloc(count, GFP_KERNEL); + if (!kbuf) + return ENOMEM; + + ret += stack_depot_print_stacks_threshold(kbuf, count, pos, &last_stack); + if (copy_to_user(buf, kbuf, ret)) + ret = -EFAULT; + + if (!ret) + last_stack = 0; + + kfree(kbuf); + return ret; +} + +static const struct file_operations proc_page_owner_stacks = { + .read = read_page_owner_stacks, +}; + static const struct file_operations proc_page_owner_operations = { .read = read_page_owner, }; @@ -676,6 +704,8 @@ static int __init pageowner_init(void) debugfs_create_file("page_owner", 0400, NULL, NULL, &proc_page_owner_operations); + debugfs_create_file("page_owner_stacks", 0400, NULL, NULL, + &proc_page_owner_stacks); return 0; } -- 2.35.3