Re: [PATCH 1/2] stackdepot: use variable size records for non-evictable entries

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Jan 26, 2024 at 3:08 PM Marco Elver <elver@xxxxxxxxxx> wrote:
>
> On Thu, Jan 25, 2024 at 11:35PM +0100, Andrey Konovalov wrote:
> [...]
> > I wonder if we should separate the stat counters for
> > evictable/non-evictable cases. For non-evictable, we could count the
> > amount of consumed memory.
> [...]
> >
> > We can also now drop the special case for DEPOT_POOLS_CAP for KMSAN.
> >
> > Otherwise, looks good to me.
> >
> > Reviewed-by: Andrey Konovalov <andreyknvl@xxxxxxxxx>
> >
> > Thank you for cleaning this up!
>
> Thanks - probably will add this change for v2:
>
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 1b0d948a053c..8f3b2c84ec2d 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -44,17 +44,7 @@
>  #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
>  #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
>                                STACK_DEPOT_EXTRA_BITS)
> -#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
> -/*
> - * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
> - * traces. As KMSAN does not support evicting stack traces from the stack
> - * depot, the stack depot capacity might be reached quickly with large stack
> - * records. Adjust the maximum number of stack depot pools for this case.
> - */
> -#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
> -#else
>  #define DEPOT_POOLS_CAP 8192
> -#endif
>  #define DEPOT_MAX_POOLS \
>         (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
>          (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
> @@ -128,18 +118,22 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
>
>  /* Statistics counters for debugfs. */
>  enum depot_counter_id {
> -       DEPOT_COUNTER_ALLOCS,
> -       DEPOT_COUNTER_FREES,
> -       DEPOT_COUNTER_INUSE,
> +       DEPOT_COUNTER_REFD_ALLOCS,
> +       DEPOT_COUNTER_REFD_FREES,
> +       DEPOT_COUNTER_REFD_INUSE,
>         DEPOT_COUNTER_FREELIST_SIZE,
> +       DEPOT_COUNTER_PERSIST_COUNT,
> +       DEPOT_COUNTER_PERSIST_BYTES,
>         DEPOT_COUNTER_COUNT,
>  };
>  static long counters[DEPOT_COUNTER_COUNT];
>  static const char *const counter_names[] = {
> -       [DEPOT_COUNTER_ALLOCS]          = "allocations",
> -       [DEPOT_COUNTER_FREES]           = "frees",
> -       [DEPOT_COUNTER_INUSE]           = "in_use",
> +       [DEPOT_COUNTER_REFD_ALLOCS]     = "refcounted_allocations",
> +       [DEPOT_COUNTER_REFD_FREES]      = "refcounted_frees",
> +       [DEPOT_COUNTER_REFD_INUSE]      = "refcounted_in_use",
>         [DEPOT_COUNTER_FREELIST_SIZE]   = "freelist_size",
> +       [DEPOT_COUNTER_PERSIST_COUNT]   = "persistent_count",
> +       [DEPOT_COUNTER_PERSIST_BYTES]   = "persistent_bytes",
>  };
>  static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
>
> @@ -388,7 +382,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
>         return stack;
>  }
>
> -/* Try to find next free usable entry. */
> +/* Try to find next free usable entry from the freelist. */
>  static struct stack_record *depot_pop_free(void)
>  {
>         struct stack_record *stack;
> @@ -466,9 +460,13 @@ depot_alloc_stack(unsigned long *entries, int nr_entries, u32 hash, depot_flags_
>
>         if (flags & STACK_DEPOT_FLAG_GET) {
>                 refcount_set(&stack->count, 1);
> +               counters[DEPOT_COUNTER_REFD_ALLOCS]++;
> +               counters[DEPOT_COUNTER_REFD_INUSE]++;
>         } else {
>                 /* Warn on attempts to switch to refcounting this entry. */
>                 refcount_set(&stack->count, REFCOUNT_SATURATED);
> +               counters[DEPOT_COUNTER_PERSIST_COUNT]++;
> +               counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
>         }
>
>         /*
> @@ -477,8 +475,6 @@ depot_alloc_stack(unsigned long *entries, int nr_entries, u32 hash, depot_flags_
>          */
>         kmsan_unpoison_memory(stack, record_size);
>
> -       counters[DEPOT_COUNTER_ALLOCS]++;
> -       counters[DEPOT_COUNTER_INUSE]++;
>         return stack;
>  }
>
> @@ -546,8 +542,8 @@ static void depot_free_stack(struct stack_record *stack)
>         list_add_tail(&stack->free_list, &free_stacks);
>
>         counters[DEPOT_COUNTER_FREELIST_SIZE]++;
> -       counters[DEPOT_COUNTER_FREES]++;
> -       counters[DEPOT_COUNTER_INUSE]--;
> +       counters[DEPOT_COUNTER_REFD_FREES]++;
> +       counters[DEPOT_COUNTER_REFD_INUSE]--;
>
>         printk_deferred_exit();
>         raw_spin_unlock_irqrestore(&pool_lock, flags);

Looks good to me, thanks!





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux