On Sun, 27 Feb 2022 at 04:08, Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> wrote: > > After commit 2dba5eb1c73b ("lib/stackdepot: allow optional init and > stack_table allocation by kvmalloc()"), stack_depot_init() is called > later if CONFIG_STACKDEPOT_ALWAYS_INIT=n to remove unnecessary memory > usage. It allocates stack_table using memblock_alloc() or kvmalloc() > depending on availability of slab allocator. > > But when stack_depot_init() is called while creating boot slab caches, > both slab allocator and memblock is not available. So kernel crashes. > Allocate stack_table from page allocator when both slab allocator and > memblock is unavailable. This is odd - who is calling stack_depot_init() while neither slab nor memblock are available? Do you have a stacktrace? > Limit size of stack_table when using page allocator because vmalloc() > is also unavailable in kmem_cache_init(). it must not be larger than > (PAGE_SIZE << (MAX_ORDER - 1)). > > This patch was tested on both CONFIG_STACKDEPOT_ALWAYS_INIT=y and n. > > Fixes: 2dba5eb1c73b ("lib/stackdepot: allow optional init and stack_table allocation by kvmalloc()") > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> > --- > lib/stackdepot.c | 28 +++++++++++++++++++++------- > 1 file changed, 21 insertions(+), 7 deletions(-) > > diff --git a/lib/stackdepot.c b/lib/stackdepot.c > index bf5ba9af0500..606f80ae2bf7 100644 > --- a/lib/stackdepot.c > +++ b/lib/stackdepot.c > @@ -73,6 +73,14 @@ static int next_slab_inited; > static size_t depot_offset; > static DEFINE_RAW_SPINLOCK(depot_lock); > > +static unsigned int stack_hash_size = (1 << CONFIG_STACK_HASH_ORDER); > +static inline unsigned int stack_hash_mask(void) > +{ > + return stack_hash_size - 1; > +} > + > +#define STACK_HASH_SEED 0x9747b28c > + > static bool init_stack_slab(void **prealloc) > { > if (!*prealloc) > @@ -142,10 +150,6 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) > return stack; > } > > -#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER) > -#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) > -#define STACK_HASH_SEED 0x9747b28c > - > static bool stack_depot_disable; > static struct stack_record **stack_table; > > @@ -172,18 +176,28 @@ __ref int stack_depot_init(void) > > mutex_lock(&stack_depot_init_mutex); > if (!stack_depot_disable && !stack_table) { > - size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); > + size_t size = (stack_hash_size * sizeof(struct stack_record *)); > int i; > > if (slab_is_available()) { > pr_info("Stack Depot allocating hash table with kvmalloc\n"); > stack_table = kvmalloc(size, GFP_KERNEL); > + } else if (totalram_pages() > 0) { > + /* Reduce size because vmalloc may be unavailable */ > + size = min(size, PAGE_SIZE << (MAX_ORDER - 1)); > + stack_hash_size = size / sizeof(struct stack_record *); > + > + pr_info("Stack Depot allocating hash table with __get_free_pages\n"); > + stack_table = (struct stack_record **) > + __get_free_pages(GFP_KERNEL, get_order(size)); > } else { > pr_info("Stack Depot allocating hash table with memblock_alloc\n"); > stack_table = memblock_alloc(size, SMP_CACHE_BYTES); > } > + > if (stack_table) { > - for (i = 0; i < STACK_HASH_SIZE; i++) > + pr_info("Stack Depot hash table size=%u\n", stack_hash_size); > + for (i = 0; i < stack_hash_size; i++) > stack_table[i] = NULL; > } else { > pr_err("Stack Depot hash table allocation failed, disabling\n"); > @@ -363,7 +377,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, > goto fast_exit; > > hash = hash_stack(entries, nr_entries); > - bucket = &stack_table[hash & STACK_HASH_MASK]; > + bucket = &stack_table[hash & stack_hash_mask()]; > > /* > * Fast path: look the stack trace up without locking. > -- > 2.33.1