On Mon, Sep 05, 2022 at 05:10AM +0200, Oscar Salvador wrote: [...] > +int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos) Can you add kernel-doc comment what this does (and also update accordingly in 3/3 when you add 'threshold'). >From what I see it prints *all* stacks that have a non-zero count. Correct? If so, should this be called stack_depot_print_all_count() (having stack(s) in the name twice doesn't make it more obvious what it does)? Then in the follow-up patch you add the 'threshold' arg. > +{ > + int i = *pos, ret = 0; > + struct stack_record **stacks, *stack; > + static struct stack_record *last = NULL; > + unsigned long stack_table_entries = stack_hash_mask + 1; > + > + /* Continue from the last stack if we have one */ > + if (last) { > + stack = last->next; This is dead code? > + } else { > +new_table: > + stacks = &stack_table[i]; > + stack = (struct stack_record *)stacks; > + } > + > + for (; stack; stack = stack->next) { > + if (!stack->size || stack->size < 0 || > + stack->size > size || stack->handle.valid != 1 || > + refcount_read(&stack->count) < 1) > + continue; > + > + ret += stack_trace_snprint(buf, size, stack->entries, stack->size, 0); > + ret += scnprintf(buf + ret, size - ret, "stack count: %d\n\n", > + refcount_read(&stack->count)); > + last = stack; > + return ret; > + } > + > + i++; > + *pos = i; > + last = NULL; > + > + /* Keep looking all tables for valid stacks */ > + if (i < stack_table_entries) > + goto new_table; > + > + return 0; > +} Either I'm missing something really obvious, but I was able to simplify the above function to just this (untested!): int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos) { const unsigned long stack_table_entries = stack_hash_mask + 1; /* Iterate over all tables for valid stacks. */ for (; *pos < stack_table_entries; (*pos)++) { for (struct stack_record *stack = stack_table[*pos]; stack; stack = stack->next) { if (!stack->size || stack->size < 0 || stack->size > size || stack->handle.valid != 1 || refcount_read(&stack->count) < 1) continue; return stack_trace_snprint(buf, size, stack->entries, stack->size, 0) + scnprintf(buf + ret, size - ret, "stack count: %d\n\n", refcount_read(&stack->count)); } } return 0; } > diff --git a/mm/page_owner.c b/mm/page_owner.c > index 8730f377fa91..d88e6b4aefa0 100644 > --- a/mm/page_owner.c > +++ b/mm/page_owner.c > @@ -664,6 +664,29 @@ static void init_early_allocated_pages(void) > init_zones_in_node(pgdat); > } > > +static ssize_t read_page_owner_stacks(struct file *file, char __user *buf, > + size_t count, loff_t *pos) > +{ > + char *kbuf; > + int ret = 0; > + > + count = min_t(size_t, count, PAGE_SIZE); > + kbuf = kmalloc(count, GFP_KERNEL); > + if (!kbuf) > + return -ENOMEM; > + > + ret += stack_depot_print_stacks_threshold(kbuf, count, pos); If I understood right, this will print *all* stacks that have non-zero count, and this isn't related to page_owner per-se. Correct? This might not be a problem right now, but once there might be more users that want to count stack usage, you'll end up with page_owner + other stacks here.