The patch titled Subject: lib/stackdepot: fix and clean-up atomic annotations has been added to the -mm mm-unstable branch. Its filename is lib-stackdepot-fix-and-clean-up-atomic-annotations.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/lib-stackdepot-fix-and-clean-up-atomic-annotations.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: lib/stackdepot: fix and clean-up atomic annotations Date: Mon, 20 Nov 2023 18:47:05 +0100 Drop smp_load_acquire from next_pool_required in depot_init_pool, as both depot_init_pool and the all smp_store_release's to this variable are executed under the stack depot lock. Also simplify and clean up comments accompanying the use of atomic accesses in the stack depot code. Link: https://lkml.kernel.org/r/c118ef044d8db80248d9e1f14592c72e8429e9d9.1700502145.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Reviewed-by: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- lib/stackdepot.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) --- a/lib/stackdepot.c~lib-stackdepot-fix-and-clean-up-atomic-annotations +++ a/lib/stackdepot.c @@ -231,10 +231,10 @@ static void depot_init_pool(void **preal /* * If the next pool is already initialized or the maximum number of * pools is reached, do not use the preallocated memory. - * smp_load_acquire() here pairs with smp_store_release() below and - * in depot_alloc_stack(). + * Access next_pool_required non-atomically, as there are no concurrent + * write accesses to this variable. */ - if (!smp_load_acquire(&next_pool_required)) + if (!next_pool_required) return; /* Check if the current pool is not yet allocated. */ @@ -255,8 +255,8 @@ static void depot_init_pool(void **preal * At this point, either the next pool is initialized or the * maximum number of pools is reached. In either case, take * note that initializing another pool is not required. - * This smp_store_release pairs with smp_load_acquire() above - * and in stack_depot_save(). + * smp_store_release() pairs with smp_load_acquire() in + * stack_depot_save(). */ smp_store_release(&next_pool_required, 0); } @@ -279,7 +279,7 @@ depot_alloc_stack(unsigned long *entries /* * Move on to the next pool. - * WRITE_ONCE pairs with potential concurrent read in + * WRITE_ONCE() pairs with potential concurrent read in * stack_depot_fetch(). */ WRITE_ONCE(pool_index, pool_index + 1); @@ -287,8 +287,8 @@ depot_alloc_stack(unsigned long *entries /* * If the maximum number of pools is not reached, take note * that the next pool needs to initialized. - * smp_store_release() here pairs with smp_load_acquire() in - * stack_depot_save() and depot_init_pool(). + * smp_store_release() pairs with smp_load_acquire() in + * stack_depot_save(). */ if (pool_index + 1 < DEPOT_MAX_POOLS) smp_store_release(&next_pool_required, 1); @@ -329,7 +329,7 @@ static struct stack_record *depot_fetch_ { union handle_parts parts = { .handle = handle }; /* - * READ_ONCE pairs with potential concurrent write in + * READ_ONCE() pairs with potential concurrent write in * depot_alloc_stack(). */ int pool_index_cached = READ_ONCE(pool_index); @@ -419,8 +419,7 @@ depot_stack_handle_t __stack_depot_save( /* * Fast path: look the stack trace up without locking. - * The smp_load_acquire() here pairs with smp_store_release() to - * |bucket| below. + * smp_load_acquire() pairs with smp_store_release() to |bucket| below. */ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); if (found) @@ -430,8 +429,8 @@ depot_stack_handle_t __stack_depot_save( * Check if another stack pool needs to be initialized. If so, allocate * the memory now - we won't be able to do that under the lock. * - * The smp_load_acquire() here pairs with smp_store_release() to - * |next_pool_inited| in depot_alloc_stack() and depot_init_pool(). + * smp_load_acquire() pairs with smp_store_release() in + * depot_alloc_stack() and depot_init_pool(). */ if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { /* @@ -457,8 +456,8 @@ depot_stack_handle_t __stack_depot_save( if (new) { new->next = *bucket; /* - * This smp_store_release() pairs with - * smp_load_acquire() from |bucket| above. + * smp_store_release() pairs with smp_load_acquire() + * from |bucket| above. */ smp_store_release(bucket, new); found = new; _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are lib-stackdepot-print-disabled-message-only-if-truly-disabled.patch lib-stackdepot-check-disabled-flag-when-fetching.patch lib-stackdepot-simplify-__stack_depot_save.patch lib-stackdepot-drop-valid-bit-from-handles.patch lib-stackdepot-add-depot_fetch_stack-helper.patch lib-stackdepot-use-fixed-sized-slots-for-stack-records.patch lib-stackdepot-fix-and-clean-up-atomic-annotations.patch lib-stackdepot-rework-helpers-for-depot_alloc_stack.patch lib-stackdepot-rename-next_pool_required-to-new_pool_required.patch lib-stackdepot-store-next-pool-pointer-in-new_pool.patch lib-stackdepot-store-free-stack-records-in-a-freelist.patch lib-stackdepot-use-read-write-lock.patch lib-stackdepot-use-list_head-for-stack-record-links.patch kmsan-use-stack_depot_save-instead-of-__stack_depot_save.patch lib-stackdepot-kasan-add-flags-to-__stack_depot_save-and-rename.patch lib-stackdepot-add-refcount-for-records.patch lib-stackdepot-allow-users-to-evict-stack-traces.patch kasan-remove-atomic-accesses-to-stack-ring-entries.patch kasan-check-object_size-in-kasan_complete_mode_report_info.patch kasan-use-stack_depot_put-for-tag-based-modes.patch kasan-use-stack_depot_put-for-generic-mode.patch lib-stackdepot-adjust-depot_pools_cap-for-kmsan.patch