On Thu, Sep 23, 2021 at 12:48 PM Marco Elver <elver@xxxxxxxxxx> wrote: > > Maintain a counter to count allocations that are skipped due to being > incompatible (oversized, incompatible gfp flags) or no capacity. > > This is to compute the fraction of allocations that could not be > serviced by KFENCE, which we expect to be rare. > > Signed-off-by: Marco Elver <elver@xxxxxxxxxx> > Reviewed-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Acked-by: Alexander Potapenko <glider@xxxxxxxxxx> > --- > v2: > * Do not count deadlock-avoidance skips. > --- > mm/kfence/core.c | 16 +++++++++++++--- > 1 file changed, 13 insertions(+), 3 deletions(-) > > diff --git a/mm/kfence/core.c b/mm/kfence/core.c > index 7a97db8bc8e7..249d75b7e5ee 100644 > --- a/mm/kfence/core.c > +++ b/mm/kfence/core.c > @@ -112,6 +112,8 @@ enum kfence_counter_id { > KFENCE_COUNTER_FREES, > KFENCE_COUNTER_ZOMBIES, > KFENCE_COUNTER_BUGS, > + KFENCE_COUNTER_SKIP_INCOMPAT, > + KFENCE_COUNTER_SKIP_CAPACITY, > KFENCE_COUNTER_COUNT, > }; > static atomic_long_t counters[KFENCE_COUNTER_COUNT]; > @@ -121,6 +123,8 @@ static const char *const counter_names[] = { > [KFENCE_COUNTER_FREES] = "total frees", > [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", > [KFENCE_COUNTER_BUGS] = "total bugs", > + [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", > + [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", > }; > static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); > > @@ -271,8 +275,10 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g > list_del_init(&meta->list); > } > raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); > - if (!meta) > + if (!meta) { > + atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); > return NULL; > + } > > if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { > /* > @@ -740,8 +746,10 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) > * Perform size check before switching kfence_allocation_gate, so that > * we don't disable KFENCE without making an allocation. > */ > - if (size > PAGE_SIZE) > + if (size > PAGE_SIZE) { > + atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); > return NULL; > + } > > /* > * Skip allocations from non-default zones, including DMA. We cannot > @@ -749,8 +757,10 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) > * properties (e.g. reside in DMAable memory). > */ > if ((flags & GFP_ZONEMASK) || > - (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) > + (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { > + atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); > return NULL; > + } > > /* > * allocation_gate only needs to become non-zero, so it doesn't make > -- > 2.33.0.464.g1972c5931b-goog > -- Alexander Potapenko Software Engineer Google Germany GmbH Erika-Mann-Straße, 33 80636 München Geschäftsführer: Paul Manicle, Halimah DeLaine Prado Registergericht und -nummer: Hamburg, HRB 86891 Sitz der Gesellschaft: Hamburg