Re: [RFC PATCH 30/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 16 Nov 2021 at 01:16, Vlastimil Babka <vbabka@xxxxxxx> wrote:
> With a struct slab definition separate from struct page, we can go further and
> define only fields that the chosen sl*b implementation uses. This means
> everything between __page_flags and __page_refcount placeholders now depends on
> the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list)
> but can be part of a union in some, so it's simpler to repeat them than
> complicate the definition with ifdefs even more.
>
> The patch doesn't change physical offsets of the fields, although it could be
> done later - for example it's now clear that tighter packing in SLOB could be
> possible.
>
> This should also prevent accidental use of fields that don't exist in given
> implementation. Before this patch virt_to_cache() and and cache_from_obj() was
> visible for SLOB (albeit not used), although it relies on the slab_cache field
> that isn't set by SLOB. With this patch it's now a compile error, so these
> functions are now hidden behind #ifndef CONFIG_SLOB.
>
> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
> Cc: Alexander Potapenko <glider@xxxxxxxxxx> (maintainer:KFENCE)
> Cc: Marco Elver <elver@xxxxxxxxxx> (maintainer:KFENCE)
> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> (reviewer:KFENCE)
> Cc: <kasan-dev@xxxxxxxxxxxxxxxx>

Ran kfence_test with both slab and slub, and all passes:

Tested-by: Marco Elver <elver@xxxxxxxxxx>

> ---
>  mm/kfence/core.c |  9 +++++----
>  mm/slab.h        | 46 ++++++++++++++++++++++++++++++++++++----------
>  2 files changed, 41 insertions(+), 14 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 4eb60cf5ff8b..46103a7628a6 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         /* Set required slab fields. */
>         slab = virt_to_slab((void *)meta->addr);
>         slab->slab_cache = cache;
> -       if (IS_ENABLED(CONFIG_SLUB))
> -               slab->objects = 1;
> -       if (IS_ENABLED(CONFIG_SLAB))
> -               slab->s_mem = addr;
> +#if defined(CONFIG_SLUB)
> +       slab->objects = 1;
> +#elif defined (CONFIG_SLAB)
> +       slab->s_mem = addr;
> +#endif
>
>         /* Memory initialization. */
>         for_each_canary(meta, set_canary_byte);
> diff --git a/mm/slab.h b/mm/slab.h
> index 58b65e5e5d49..10a9ee195249 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -8,9 +8,24 @@
>  /* Reuses the bits in struct page */
>  struct slab {
>         unsigned long __page_flags;
> +
> +#if defined(CONFIG_SLAB)
> +
> +       union {
> +               struct list_head slab_list;
> +               struct rcu_head rcu_head;
> +       };
> +       struct kmem_cache *slab_cache;
> +       void *freelist; /* array of free object indexes */
> +       void * s_mem;   /* first object */
> +       unsigned int active;
> +
> +#elif defined(CONFIG_SLUB)
> +
>         union {
>                 struct list_head slab_list;
> -               struct {        /* Partial pages */
> +               struct rcu_head rcu_head;
> +               struct {
>                         struct slab *next;
>  #ifdef CONFIG_64BIT
>                         int slabs;      /* Nr of slabs left */
> @@ -18,25 +33,32 @@ struct slab {
>                         short int slabs;
>  #endif
>                 };
> -               struct rcu_head rcu_head;
>         };
> -       struct kmem_cache *slab_cache; /* not slob */
> +       struct kmem_cache *slab_cache;
>         /* Double-word boundary */
>         void *freelist;         /* first free object */
>         union {
> -               void *s_mem;    /* slab: first object */
> -               unsigned long counters;         /* SLUB */
> -               struct {                        /* SLUB */
> +               unsigned long counters;
> +               struct {
>                         unsigned inuse:16;
>                         unsigned objects:15;
>                         unsigned frozen:1;
>                 };
>         };
> +       unsigned int __unused;
> +
> +#elif defined(CONFIG_SLOB)
> +
> +       struct list_head slab_list;
> +       void * __unused_1;
> +       void *freelist;         /* first free block */
> +       void * __unused_2;
> +       int units;
> +
> +#else
> +#error "Unexpected slab allocator configured"
> +#endif
>
> -       union {
> -               unsigned int active;            /* SLAB */
> -               int units;                      /* SLOB */
> -       };
>         atomic_t __page_refcount;
>  #ifdef CONFIG_MEMCG
>         unsigned long memcg_data;
> @@ -47,7 +69,9 @@ struct slab {
>         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
>  SLAB_MATCH(flags, __page_flags);
>  SLAB_MATCH(compound_head, slab_list);  /* Ensure bit 0 is clear */
> +#ifndef CONFIG_SLOB
>  SLAB_MATCH(rcu_head, rcu_head);
> +#endif
>  SLAB_MATCH(_refcount, __page_refcount);
>  #ifdef CONFIG_MEMCG
>  SLAB_MATCH(memcg_data, memcg_data);
> @@ -623,6 +647,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>
> +#ifndef CONFIG_SLOB
>  static inline struct kmem_cache *virt_to_cache(const void *obj)
>  {
>         struct slab *slab;
> @@ -669,6 +694,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
>                 print_tracking(cachep, x);
>         return cachep;
>  }
> +#endif /* CONFIG_SLOB */
>
>  static inline size_t slab_ksize(const struct kmem_cache *s)
>  {
> --
> 2.33.1
>




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux