The patch titled Subject: lib/stackdepot: annotate init and early init functions has been added to the -mm mm-nonmm-unstable branch. Its filename is lib-stackdepot-annotate-init-and-early-init-functions.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/lib-stackdepot-annotate-init-and-early-init-functions.patch This patch will later appear in the mm-nonmm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: lib/stackdepot: annotate init and early init functions Date: Mon, 30 Jan 2023 21:49:30 +0100 Add comments to stack_depot_early_init and stack_depot_init to explain certain parts of their implementation. Also add a pr_info message to stack_depot_early_init similar to the one in stack_depot_init. Also move the scale variable in stack_depot_init to the scope where it is being used. Link: https://lkml.kernel.org/r/be09b64fb196ffe0c19ce7afc4130efba5425df9.1675111415.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/lib/stackdepot.c~lib-stackdepot-annotate-init-and-early-init-functions +++ a/lib/stackdepot.c @@ -115,24 +115,34 @@ void __init stack_depot_request_early_in __stack_depot_early_init_requested = true; } +/* Allocates a hash table via memblock. Can only be used during early boot. */ int __init stack_depot_early_init(void) { unsigned long entries = 0; - /* This is supposed to be called only once, from mm_init() */ + /* This function must be called only once, from mm_init(). */ if (WARN_ON(__stack_depot_early_init_passed)) return 0; - __stack_depot_early_init_passed = true; + /* + * If KASAN is enabled, use the maximum order: KASAN is frequently used + * in fuzzing scenarios, which leads to a large number of different + * stack traces being stored in stack depot. + */ if (kasan_enabled() && !stack_hash_order) stack_hash_order = STACK_HASH_ORDER_MAX; if (!__stack_depot_early_init_requested || stack_depot_disabled) return 0; + /* + * If stack_hash_order is not set, leave entries as 0 to rely on the + * automatic calculations performed by alloc_large_system_hash. + */ if (stack_hash_order) - entries = 1UL << stack_hash_order; + entries = 1UL << stack_hash_order; + pr_info("allocating hash table via alloc_large_system_hash\n"); stack_table = alloc_large_system_hash("stackdepot", sizeof(struct stack_record *), entries, @@ -142,7 +152,6 @@ int __init stack_depot_early_init(void) &stack_hash_mask, 1UL << STACK_HASH_ORDER_MIN, 1UL << STACK_HASH_ORDER_MAX); - if (!stack_table) { pr_err("hash table allocation failed, disabling\n"); stack_depot_disabled = true; @@ -152,6 +161,7 @@ int __init stack_depot_early_init(void) return 0; } +/* Allocates a hash table via kvmalloc. Can be used after boot. */ int stack_depot_init(void) { static DEFINE_MUTEX(stack_depot_init_mutex); @@ -160,11 +170,16 @@ int stack_depot_init(void) mutex_lock(&stack_depot_init_mutex); if (!stack_depot_disabled && !stack_table) { unsigned long entries; - int scale = STACK_HASH_SCALE; + /* + * Similarly to stack_depot_early_init, use stack_hash_order + * if assigned, and rely on automatic scaling otherwise. + */ if (stack_hash_order) { entries = 1UL << stack_hash_order; } else { + int scale = STACK_HASH_SCALE; + entries = nr_free_buffer_pages(); entries = roundup_pow_of_two(entries); @@ -179,7 +194,7 @@ int stack_depot_init(void) if (entries > 1UL << STACK_HASH_ORDER_MAX) entries = 1UL << STACK_HASH_ORDER_MAX; - pr_info("allocating hash table of %lu entries with kvcalloc\n", + pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); if (!stack_table) { _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-reset-page-tags-properly-with-sampling.patch kasan-reset-page-tags-properly-with-sampling-v2.patch lib-stackdepot-fix-setting-next_slab_inited-in-init_stack_slab.patch lib-stackdepot-put-functions-in-logical-order.patch lib-stackdepot-use-pr_fmt-to-define-message-format.patch lib-stackdepot-mm-rename-stack_depot_want_early_init.patch lib-stackdepot-rename-stack_depot_disable.patch lib-stackdepot-annotate-init-and-early-init-functions.patch lib-stackdepot-lower-the-indentation-in-stack_depot_init.patch lib-stackdepot-reorder-and-annotate-global-variables.patch lib-stackdepot-rename-hash-table-constants-and-variables.patch lib-stackdepot-rename-init_stack_slab.patch lib-stackdepot-rename-slab-variables.patch lib-stackdepot-rename-handle-and-slab-constants.patch lib-stacktrace-drop-impossible-warn_on-for-depot_init_slab.patch lib-stackdepot-annotate-depot_init_slab-and-depot_alloc_stack.patch lib-stacktrace-kasan-kmsan-rework-extra_bits-interface.patch lib-stackdepot-annotate-racy-slab_index-accesses.patch lib-stackdepot-various-comments-clean-ups.patch lib-stackdepot-move-documentation-comments-to-stackdepoth.patch