The patch titled Subject: kfence: move saving stack trace of allocations into __kfence_alloc() has been added to the -mm tree. Its filename is kfence-move-saving-stack-trace-of-allocations-into-__kfence_alloc.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/kfence-move-saving-stack-trace-of-allocations-into-__kfence_alloc.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/kfence-move-saving-stack-trace-of-allocations-into-__kfence_alloc.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Marco Elver <elver@xxxxxxxxxx> Subject: kfence: move saving stack trace of allocations into __kfence_alloc() Move the saving of the stack trace of allocations into __kfence_alloc(), so that the stack entries array can be used outside of kfence_guarded_alloc() and we avoid potentially unwinding the stack multiple times. Link: https://lkml.kernel.org/r/20210923104803.2620285-3-elver@xxxxxxxxxx Signed-off-by: Marco Elver <elver@xxxxxxxxxx> Reviewed-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Acked-by: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Aleksandr Nogikh <nogikh@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Taras Madan <tarasmadan@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/kfence/core.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) --- a/mm/kfence/core.c~kfence-move-saving-stack-trace-of-allocations-into-__kfence_alloc +++ a/mm/kfence/core.c @@ -187,19 +187,26 @@ static inline unsigned long metadata_to_ * Update the object's metadata state, including updating the alloc/free stacks * depending on the state transition. */ -static noinline void metadata_update_state(struct kfence_metadata *meta, - enum kfence_object_state next) +static noinline void +metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, + unsigned long *stack_entries, size_t num_stack_entries) { struct kfence_track *track = next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; lockdep_assert_held(&meta->lock); - /* - * Skip over 1 (this) functions; noinline ensures we do not accidentally - * skip over the caller by never inlining. - */ - track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); + if (stack_entries) { + memcpy(track->stack_entries, stack_entries, + num_stack_entries * sizeof(stack_entries[0])); + } else { + /* + * Skip over 1 (this) functions; noinline ensures we do not + * accidentally skip over the caller by never inlining. + */ + num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); + } + track->num_stack_entries = num_stack_entries; track->pid = task_pid_nr(current); track->cpu = raw_smp_processor_id(); track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ @@ -261,7 +268,8 @@ static __always_inline void for_each_can } } -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp) +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, + unsigned long *stack_entries, size_t num_stack_entries) { struct kfence_metadata *meta = NULL; unsigned long flags; @@ -320,7 +328,7 @@ static void *kfence_guarded_alloc(struct addr = (void *)meta->addr; /* Update remaining metadata. */ - metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED); + metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ WRITE_ONCE(meta->cache, cache); meta->size = size; @@ -400,7 +408,7 @@ static void kfence_guarded_free(void *ad memzero_explicit(addr, meta->size); /* Mark the object as freed. */ - metadata_update_state(meta, KFENCE_OBJECT_FREED); + metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -742,6 +750,9 @@ void kfence_shutdown_cache(struct kmem_c void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { + unsigned long stack_entries[KFENCE_STACK_DEPTH]; + size_t num_stack_entries; + /* * Perform size check before switching kfence_allocation_gate, so that * we don't disable KFENCE without making an allocation. @@ -786,7 +797,9 @@ void *__kfence_alloc(struct kmem_cache * if (!READ_ONCE(kfence_enabled)) return NULL; - return kfence_guarded_alloc(s, size, flags); + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries); } size_t kfence_ksize(const void *addr) _ Patches currently in -mm which might be from elver@xxxxxxxxxx are kasan-fix-kconfig-check-of-cc_has_working_nosanitize_address.patch lib-stackdepot-include-gfph.patch lib-stackdepot-remove-unused-function-argument.patch lib-stackdepot-introduce-__stack_depot_save.patch kasan-common-provide-can_alloc-in-kasan_save_stack.patch kasan-generic-introduce-kasan_record_aux_stack_noalloc.patch workqueue-kasan-avoid-alloc_pages-when-recording-stack.patch mm-fix-data-race-in-pagepoisoned.patch stacktrace-move-filter_irq_stacks-to-kernel-stacktracec.patch kfence-count-unexpectedly-skipped-allocations.patch kfence-move-saving-stack-trace-of-allocations-into-__kfence_alloc.patch kfence-limit-currently-covered-allocations-when-pool-nearly-full.patch kfence-add-note-to-documentation-about-skipping-covered-allocations.patch