The patch titled Subject: lib/stackdepot: fix and clean-up atomic annotations has been added to the -mm mm-unstable branch. Its filename is lib-stackdepot-fix-and-clean-up-atomic-annotations.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/lib-stackdepot-fix-and-clean-up-atomic-annotations.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: lib/stackdepot: fix and clean-up atomic annotations Date: Wed, 13 Sep 2023 19:14:31 +0200 Simplify comments accompanying the use of atomic accesses in the stack depot code. Also drop smp_load_acquire from next_pool_required in depot_init_pool, as both depot_init_pool and the all smp_store_release's to this variable are executed under the stack depot lock. Link: https://lkml.kernel.org/r/e78360a883edac7bc3c6a351c99a6019beacf264.1694625260.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- lib/stackdepot.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) --- a/lib/stackdepot.c~lib-stackdepot-fix-and-clean-up-atomic-annotations +++ a/lib/stackdepot.c @@ -225,10 +225,8 @@ static void depot_init_pool(void **preal /* * If the next pool is already initialized or the maximum number of * pools is reached, do not use the preallocated memory. - * smp_load_acquire() here pairs with smp_store_release() below and - * in depot_alloc_stack(). */ - if (!smp_load_acquire(&next_pool_required)) + if (!next_pool_required) return; /* Check if the current pool is not yet allocated. */ @@ -249,8 +247,8 @@ static void depot_init_pool(void **preal * At this point, either the next pool is initialized or the * maximum number of pools is reached. In either case, take * note that initializing another pool is not required. - * This smp_store_release pairs with smp_load_acquire() above - * and in stack_depot_save(). + * smp_store_release pairs with smp_load_acquire in + * stack_depot_save. */ smp_store_release(&next_pool_required, 0); } @@ -274,15 +272,15 @@ depot_alloc_stack(unsigned long *entries /* * Move on to the next pool. * WRITE_ONCE pairs with potential concurrent read in - * stack_depot_fetch(). + * stack_depot_fetch. */ WRITE_ONCE(pool_index, pool_index + 1); pool_offset = 0; /* * If the maximum number of pools is not reached, take note * that the next pool needs to initialized. - * smp_store_release() here pairs with smp_load_acquire() in - * stack_depot_save() and depot_init_pool(). + * smp_store_release pairs with smp_load_acquire in + * stack_depot_save. */ if (pool_index + 1 < DEPOT_MAX_POOLS) smp_store_release(&next_pool_required, 1); @@ -324,7 +322,7 @@ static struct stack_record *depot_fetch_ union handle_parts parts = { .handle = handle }; /* * READ_ONCE pairs with potential concurrent write in - * depot_alloc_stack(). + * depot_alloc_stack. */ int pool_index_cached = READ_ONCE(pool_index); void *pool; @@ -413,8 +411,7 @@ depot_stack_handle_t __stack_depot_save( /* * Fast path: look the stack trace up without locking. - * The smp_load_acquire() here pairs with smp_store_release() to - * |bucket| below. + * smp_load_acquire pairs with smp_store_release to |bucket| below. */ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); if (found) @@ -424,8 +421,8 @@ depot_stack_handle_t __stack_depot_save( * Check if another stack pool needs to be initialized. If so, allocate * the memory now - we won't be able to do that under the lock. * - * The smp_load_acquire() here pairs with smp_store_release() to - * |next_pool_inited| in depot_alloc_stack() and depot_init_pool(). + * smp_load_acquire pairs with smp_store_release in depot_alloc_stack + * and depot_init_pool. */ if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { /* @@ -451,8 +448,8 @@ depot_stack_handle_t __stack_depot_save( if (new) { new->next = *bucket; /* - * This smp_store_release() pairs with - * smp_load_acquire() from |bucket| above. + * smp_store_release pairs with smp_load_acquire + * from |bucket| above. */ smp_store_release(bucket, new); found = new; _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are lib-stackdepot-check-disabled-flag-when-fetching.patch lib-stackdepot-simplify-__stack_depot_save.patch lib-stackdepot-drop-valid-bit-from-handles.patch lib-stackdepot-add-depot_fetch_stack-helper.patch lib-stackdepot-use-fixed-sized-slots-for-stack-records.patch lib-stackdepot-fix-and-clean-up-atomic-annotations.patch lib-stackdepot-rework-helpers-for-depot_alloc_stack.patch lib-stackdepot-rename-next_pool_required-to-new_pool_required.patch lib-stackdepot-store-next-pool-pointer-in-new_pool.patch lib-stackdepot-store-free-stack-records-in-a-freelist.patch lib-stackdepot-use-read-write-lock.patch lib-stackdepot-use-list_head-for-stack-record-links.patch kmsan-use-stack_depot_save-instead-of-__stack_depot_save.patch lib-stackdepot-kasan-add-flags-to-__stack_depot_save-and-rename.patch lib-stackdepot-add-refcount-for-records.patch lib-stackdepot-allow-users-to-evict-stack-traces.patch kasan-remove-atomic-accesses-to-stack-ring-entries.patch kasan-check-object_size-in-kasan_complete_mode_report_info.patch kasan-use-stack_depot_put-for-tag-based-modes.patch