On 8/25/22 09:13, Vlastimil Babka wrote: > On 8/25/22 04:56, Joel Fernandes wrote: >> Hi, >> I am trying to add some debug information to rcu_head for some patches, and need >> your help! At least this used to work some time ago (> 1 year). > > Hmm, got a patch of how it worked back then? Because what I see (in v5.12) > struct page definition: > > struct rcu_head rcu_head; > > is in union with the whole of > > struct { /* slab, slob and slub */ ... } > > which starts with > > union { struct list_head slab_list; ... } > struct kmem_cache *slab_cache; /* not slob */ > > and there's e.g. kmem_rcu_free() in mm/slab.c that does > > page = container_of(head, struct page, rcu_head); > cachep = page->slab_cache; > > and very similar thing in mm/slub.c rcu_free_slab() - we assume that the > rcu_head and slab_cache fields can coexist. > > So this looks silently fragile to me? In case rcu_head becomes larger than > list_head, e.g. with your buf[4], it would start overlaping the > page->slab_cache field, no? > > AFAICS the way it's structured now in struct slab in mm/slab.h, it makes the > overlap impossible, thus the struct slab will grow as rcu_head grows, and > offsets of the later fields stop matching struct page counterparts, which > doesn't grow. Hmm. The following has made things compile for both SLUB and SLAB (didn't adjust SLOB), the idea is to define more explicitly that everything can overlap with rcu_head except slab_cache. So the structures don't even grow in the end. Which requires moving slab_cache field around. Which would be fine except for SLUB and its cmpxchg_double usage, which requires the freelist+counter fields to be 128bit aligned, which means we can't fit it all with these constraints. So that's broken with the patch and I don't see an easy way to fix this. But maybe it can unblock your current debugging efforts if you use SLAB, or disable the cmpxchg_double in SLUB (at some perf cost). ----8<---- diff --git a/include/linux/types.h b/include/linux/types.h index ea8cf60a8a79..daf7682a7a3b 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -220,6 +220,7 @@ struct ustat { struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); + char buf[4]; } __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head diff --git a/mm/slab.h b/mm/slab.h index 4ec82bec15ec..9fdbfcc401c0 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -12,37 +12,42 @@ struct slab { #if defined(CONFIG_SLAB) union { - struct list_head slab_list; + struct { + struct list_head slab_list; + void *freelist; /* array of free object indexes */ + void *s_mem; /* first object */ + }; struct rcu_head rcu_head; }; struct kmem_cache *slab_cache; - void *freelist; /* array of free object indexes */ - void *s_mem; /* first object */ unsigned int active; #elif defined(CONFIG_SLUB) union { - struct list_head slab_list; - struct rcu_head rcu_head; -#ifdef CONFIG_SLUB_CPU_PARTIAL struct { - struct slab *next; - int slabs; /* Nr of slabs left */ - }; + union { + struct list_head slab_list; +#ifdef CONFIG_SLUB_CPU_PARTIAL + struct { + struct slab *next; + int slabs; /* Nr of slabs left */ + }; #endif - }; - struct kmem_cache *slab_cache; - /* Double-word boundary */ - void *freelist; /* first free object */ - union { - unsigned long counters; - struct { - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; + }; + void *freelist; /* first free object */ + union { + unsigned long counters; + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; }; + struct rcu_head rcu_head; }; + struct kmem_cache *slab_cache; unsigned int __unused; #elif defined(CONFIG_SLOB)