From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Add a helper depot_fetch_stack function that fetches the pointer to a stack record. With this change, all static depot_* functions now operate on stack pools and the exported stack_depot_* functions operate on the hash table. Reviewed-by: Alexander Potapenko <glider@xxxxxxxxxx> Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> --- Changes v1->v2: - Minor comment fix as suggested by Alexander. --- lib/stackdepot.c | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 46a422d31c1f..e41713983cac 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -310,6 +310,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) stack->handle.extra = 0; memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); pool_offset += required_size; + /* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it. @@ -319,6 +320,32 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) return stack; } +static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) +{ + union handle_parts parts = { .handle = handle }; + /* + * READ_ONCE pairs with potential concurrent write in + * depot_alloc_stack(). + */ + int pool_index_cached = READ_ONCE(pool_index); + void *pool; + size_t offset = parts.offset << DEPOT_STACK_ALIGN; + struct stack_record *stack; + + if (parts.pool_index > pool_index_cached) { + WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", + parts.pool_index, pool_index_cached, handle); + return NULL; + } + + pool = stack_pools[parts.pool_index]; + if (!pool) + return NULL; + + stack = pool + offset; + return stack; +} + /* Calculates the hash for a stack. */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { @@ -462,14 +489,6 @@ EXPORT_SYMBOL_GPL(stack_depot_save); unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { - union handle_parts parts = { .handle = handle }; - /* - * READ_ONCE pairs with potential concurrent write in - * depot_alloc_stack. - */ - int pool_index_cached = READ_ONCE(pool_index); - void *pool; - size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; *entries = NULL; @@ -482,15 +501,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, if (!handle || stack_depot_disabled) return 0; - if (parts.pool_index > pool_index_cached) { - WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pool_index_cached, handle); - return 0; - } - pool = stack_pools[parts.pool_index]; - if (!pool) - return 0; - stack = pool + offset; + stack = depot_fetch_stack(handle); *entries = stack->entries; return stack->size; -- 2.25.1