On Tue, Jan 04, 2022 at 01:10:41AM +0100, Vlastimil Babka wrote: > With a struct slab definition separate from struct page, we can go > further and define only fields that the chosen sl*b implementation uses. > This means everything between __page_flags and __page_refcount > placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in > all implementations (slab_list) but can be part of a union in some, so > it's simpler to repeat them than complicate the definition with ifdefs > even more. > > The patch doesn't change physical offsets of the fields, although it > could be done later - for example it's now clear that tighter packing in > SLOB could be possible. > > This should also prevent accidental use of fields that don't exist in > given implementation. Before this patch virt_to_cache() and > cache_from_obj() were visible for SLOB (albeit not used), although they > rely on the slab_cache field that isn't set by SLOB. With this patch > it's now a compile error, so these functions are now hidden behind > an #ifndef CONFIG_SLOB. > > Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> > Tested-by: Marco Elver <elver@xxxxxxxxxx> # kfence > Reviewed-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> > Tested-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> > Cc: Alexander Potapenko <glider@xxxxxxxxxx> > Cc: Marco Elver <elver@xxxxxxxxxx> > Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> > Cc: <kasan-dev@xxxxxxxxxxxxxxxx> > --- > mm/kfence/core.c | 9 +++++---- > mm/slab.h | 48 ++++++++++++++++++++++++++++++++++++++---------- > 2 files changed, 43 insertions(+), 14 deletions(-) > > diff --git a/mm/kfence/core.c b/mm/kfence/core.c > index 4eb60cf5ff8b..267dfde43b91 100644 > --- a/mm/kfence/core.c > +++ b/mm/kfence/core.c > @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g > /* Set required slab fields. */ > slab = virt_to_slab((void *)meta->addr); > slab->slab_cache = cache; > - if (IS_ENABLED(CONFIG_SLUB)) > - slab->objects = 1; > - if (IS_ENABLED(CONFIG_SLAB)) > - slab->s_mem = addr; > +#if defined(CONFIG_SLUB) > + slab->objects = 1; > +#elif defined(CONFIG_SLAB) > + slab->s_mem = addr; > +#endif > > /* Memory initialization. */ > for_each_canary(meta, set_canary_byte); > diff --git a/mm/slab.h b/mm/slab.h > index 36e0022d8267..b8da249f44f9 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -8,9 +8,24 @@ > /* Reuses the bits in struct page */ > struct slab { > unsigned long __page_flags; > + > +#if defined(CONFIG_SLAB) > + > union { > struct list_head slab_list; > - struct { /* Partial pages */ > + struct rcu_head rcu_head; > + }; > + struct kmem_cache *slab_cache; > + void *freelist; /* array of free object indexes */ > + void *s_mem; /* first object */ > + unsigned int active; > + > +#elif defined(CONFIG_SLUB) > + > + union { > + struct list_head slab_list; > + struct rcu_head rcu_head; > + struct { > struct slab *next; > #ifdef CONFIG_64BIT > int slabs; /* Nr of slabs left */ > @@ -18,25 +33,32 @@ struct slab { > short int slabs; > #endif > }; > - struct rcu_head rcu_head; > }; > - struct kmem_cache *slab_cache; /* not slob */ > + struct kmem_cache *slab_cache; > /* Double-word boundary */ > void *freelist; /* first free object */ > union { > - void *s_mem; /* slab: first object */ > - unsigned long counters; /* SLUB */ > - struct { /* SLUB */ > + unsigned long counters; > + struct { > unsigned inuse:16; > unsigned objects:15; > unsigned frozen:1; > }; > }; > + unsigned int __unused; > + > +#elif defined(CONFIG_SLOB) > + > + struct list_head slab_list; > + void *__unused_1; > + void *freelist; /* first free block */ > + void *__unused_2; > + int units; > + > +#else > +#error "Unexpected slab allocator configured" > +#endif Nice! Reviewed-by: Roman Gushchin <guro@xxxxxx>