On Tue, Mar 01, 2022 at 03:36:38AM +0000, Hyeonggon Yoo wrote: > commit ba10d4b46655 ("mm/slub: use stackdepot to save stack trace in > objects") initializes stack depot in cache creation if SLAB_STORE_USER > flag is set. > > This can make kernel crash because a cache can be crashed in various > contexts. For example if user sets slub_debug=U, kernel crashes > because create_boot_cache() calls stack_depot_init(), which tries to > allocate hash table using memblock_alloc() if slab is not available. > But memblock is also not available at that time. > > This patch solves the problem by initializing stack depot early > in boot process if SLAB_STORE_USER debug flag is set globally > or the flag is set for at least one cache. > Hello Vlastimil, would you pick this up into slub-stackdepot-v1, or fold it into original patch (2/5)? Thanks! -- Thank you, You are awesome! Hyeonggon :-) > [ elver@xxxxxxxxxx: initialize stack depot depending on slub_debug > parameter instead of allowing stack_depot_init() to be called > during kmem_cache_init() for simplicity. ] > > [ vbabka@xxxxxxx: parse slub_debug parameter in setup_slub_debug() > and initialize stack depot in stack_depot_early_init(). ] > > Link: https://lore.kernel.org/all/YhyeaP8lrzKgKm5A@xxxxxxxxxxxxxxxxxxx-northeast-1.compute.internal/ > Fixes: ba10d4b46655 ("mm/slub: use stackdepot to save stack trace in objects") > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> > --- > include/linux/slab.h | 6 ++++++ > include/linux/stackdepot.h | 3 ++- > mm/slub.c | 18 +++++++++++++++--- > 3 files changed, 23 insertions(+), 4 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index 37bde99b74af..062128e0db10 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -763,6 +763,12 @@ extern void kvfree_sensitive(const void *addr, size_t len); > unsigned int kmem_cache_size(struct kmem_cache *s); > void __init kmem_cache_init_late(void); > > +#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_STACKDEPOT) > +int slab_stack_depot_init(void); > +#else > +int slab_stack_depot_init(void) { return 0; } > +#endif > + > #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) > int slab_prepare_cpu(unsigned int cpu); > int slab_dead_cpu(unsigned int cpu); > diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h > index 17f992fe6355..a813a2673c48 100644 > --- a/include/linux/stackdepot.h > +++ b/include/linux/stackdepot.h > @@ -12,6 +12,7 @@ > #define _LINUX_STACKDEPOT_H > > #include <linux/gfp.h> > +#include <linux/slab.h> > > typedef u32 depot_stack_handle_t; > > @@ -32,7 +33,7 @@ int stack_depot_init(void); > #ifdef CONFIG_STACKDEPOT_ALWAYS_INIT > static inline int stack_depot_early_init(void) { return stack_depot_init(); } > #else > -static inline int stack_depot_early_init(void) { return 0; } > +static inline int stack_depot_early_init(void) { return slab_stack_depot_init(); } > #endif > > depot_stack_handle_t stack_depot_save(unsigned long *entries, > diff --git a/mm/slub.c b/mm/slub.c > index a74afe59a403..2419fc3cc9f3 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -646,6 +646,14 @@ static slab_flags_t slub_debug; > > static char *slub_debug_string; > static int disable_higher_order_debug; > +static bool init_stack_depot; > + > +int slab_stack_depot_init(void) > +{ > + if (init_stack_depot) > + stack_depot_init(); > + return 0; > +} > > /* > * slub is about to manipulate internal object metadata. This memory lies > @@ -1531,6 +1539,8 @@ static int __init setup_slub_debug(char *str) > global_slub_debug_changed = true; > } else { > slab_list_specified = true; > + if (flags & SLAB_STORE_USER) > + init_stack_depot = true; > } > } > > @@ -1546,6 +1556,10 @@ static int __init setup_slub_debug(char *str) > global_flags = slub_debug; > slub_debug_string = saved_str; > } > + > + if (global_flags & SLAB_STORE_USER) > + init_stack_depot = true; > + > out: > slub_debug = global_flags; > if (slub_debug != 0 || slub_debug_string) > @@ -1556,6 +1570,7 @@ static int __init setup_slub_debug(char *str) > static_branch_unlikely(&init_on_free)) && > (slub_debug & SLAB_POISON)) > pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); > + > return 1; > } > > @@ -4221,9 +4236,6 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) > s->remote_node_defrag_ratio = 1000; > #endif > > - if (s->flags & SLAB_STORE_USER && IS_ENABLED(CONFIG_STACKDEPOT)) > - stack_depot_init(); > - > /* Initialize the pre-computed randomized freelist if slab is up */ > if (slab_state >= UP) { > if (init_cache_random_seq(s)) > -- > 2.33.1