On Fri, 3 Nov 2023 at 22:27, <andrey.konovalov@xxxxxxxxx> wrote: > > From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> > > Evict alloc/free stack traces from the stack depot for Generic KASAN > once they are evicted from the quaratine. > > For auxiliary stack traces, evict the oldest stack trace once a new one > is saved (KASAN only keeps references to the last two). > > Also evict all save stack traces on krealloc. > > To avoid double-evicting and mis-evicting stack traces (in case KASAN's > metadata was corrupted), reset KASAN's per-object metadata that stores > stack depot handles when the object is initialized and when it's evicted > from the quarantine. > > Note that stack_depot_put is no-op of the handle is 0. > > Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Reviewed-by: Marco Elver <elver@xxxxxxxxxx> Maybe mention the space and performance difference from your experiments. As-is, it's a bit cryptic what the benefit is. I assume this patch goes along with the other series. > --- > > This goes on top of the "stackdepot: allow evicting stack traces" series. > I'll mail the patches all together after the merge window. > --- > mm/kasan/common.c | 3 ++- > mm/kasan/generic.c | 22 ++++++++++++++++++---- > mm/kasan/quarantine.c | 26 ++++++++++++++++++++------ > 3 files changed, 40 insertions(+), 11 deletions(-) > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > index 825a0240ec02..b5d8bd26fced 100644 > --- a/mm/kasan/common.c > +++ b/mm/kasan/common.c > @@ -50,7 +50,8 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) > void kasan_set_track(struct kasan_track *track, gfp_t flags) > { > track->pid = current->pid; > - track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC); > + track->stack = kasan_save_stack(flags, > + STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); > } > > #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > index 5d168c9afb32..50cc519e23f4 100644 > --- a/mm/kasan/generic.c > +++ b/mm/kasan/generic.c > @@ -449,10 +449,14 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, > void kasan_init_object_meta(struct kmem_cache *cache, const void *object) > { > struct kasan_alloc_meta *alloc_meta; > + struct kasan_free_meta *free_meta; > > alloc_meta = kasan_get_alloc_meta(cache, object); > if (alloc_meta) > __memset(alloc_meta, 0, sizeof(*alloc_meta)); > + free_meta = kasan_get_free_meta(cache, object); > + if (free_meta) > + __memset(free_meta, 0, sizeof(*free_meta)); > } > > size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) > @@ -489,18 +493,20 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) > if (!alloc_meta) > return; > > + stack_depot_put(alloc_meta->aux_stack[1]); > alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; > alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); > } > > void kasan_record_aux_stack(void *addr) > { > - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); > + return __kasan_record_aux_stack(addr, > + STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); > } > > void kasan_record_aux_stack_noalloc(void *addr) > { > - return __kasan_record_aux_stack(addr, 0); > + return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET); > } > > void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) > @@ -508,8 +514,16 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) > struct kasan_alloc_meta *alloc_meta; > > alloc_meta = kasan_get_alloc_meta(cache, object); > - if (alloc_meta) > - kasan_set_track(&alloc_meta->alloc_track, flags); > + if (!alloc_meta) > + return; > + > + /* Evict previous stack traces (might exist for krealloc). */ > + stack_depot_put(alloc_meta->alloc_track.stack); > + stack_depot_put(alloc_meta->aux_stack[0]); > + stack_depot_put(alloc_meta->aux_stack[1]); > + __memset(alloc_meta, 0, sizeof(*alloc_meta)); > + > + kasan_set_track(&alloc_meta->alloc_track, flags); > } > > void kasan_save_free_info(struct kmem_cache *cache, void *object) > diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c > index 152dca73f398..37fb0e3f5876 100644 > --- a/mm/kasan/quarantine.c > +++ b/mm/kasan/quarantine.c > @@ -141,11 +141,22 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) > static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) > { > void *object = qlink_to_object(qlink, cache); > - struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); > + struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object); > + struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); > unsigned long flags; > > - if (IS_ENABLED(CONFIG_SLAB)) > - local_irq_save(flags); > + if (alloc_meta) { > + stack_depot_put(alloc_meta->alloc_track.stack); > + stack_depot_put(alloc_meta->aux_stack[0]); > + stack_depot_put(alloc_meta->aux_stack[1]); > + __memset(alloc_meta, 0, sizeof(*alloc_meta)); > + } > + > + if (free_meta && > + *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) { > + stack_depot_put(free_meta->free_track.stack); > + free_meta->free_track.stack = 0; > + } > > /* > * If init_on_free is enabled and KASAN's free metadata is stored in > @@ -155,14 +166,17 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) > */ > if (slab_want_init_on_free(cache) && > cache->kasan_info.free_meta_offset == 0) > - memzero_explicit(meta, sizeof(*meta)); > + memzero_explicit(free_meta, sizeof(*free_meta)); > > /* > - * As the object now gets freed from the quarantine, assume that its > - * free track is no longer valid. > + * As the object now gets freed from the quarantine, > + * take note that its free track is no longer exists. > */ > *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; > > + if (IS_ENABLED(CONFIG_SLAB)) > + local_irq_save(flags); > + > ___cache_free(cache, object, _THIS_IP_); > > if (IS_ENABLED(CONFIG_SLAB)) > -- > 2.25.1 >