On Fri, Jul 09 2021 at 07:21, Mike Galbraith wrote: > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -2497,7 +2497,9 @@ static void put_cpu_partial(struct kmem_ > * partial array is full. Move the existing > * set to the per node partial list. > */ > + local_lock(&s->cpu_slab->lock); > unfreeze_partials(s); > + local_unlock(&s->cpu_slab->lock); > oldpage = NULL; > pobjects = 0; > pages = 0; > @@ -2579,7 +2581,9 @@ static void flush_cpu_slab(struct work_s > if (c->page) > flush_slab(s, c, true); > > + local_lock(&s->cpu_slab->lock); > unfreeze_partials(s); > + local_unlock(&s->cpu_slab->lock); > } > > static bool has_cpu_slab(int cpu, struct kmem_cache *s) > @@ -2632,8 +2636,11 @@ static int slub_cpu_dead(unsigned int cp > struct kmem_cache *s; > > mutex_lock(&slab_mutex); > - list_for_each_entry(s, &slab_caches, list) > + list_for_each_entry(s, &slab_caches, list) { > + local_lock(&s->cpu_slab->lock); This one is odd. It locks the cpu_slab lock of the CPU which runs this callback and then flushes the slab of the dead CPU. Thanks, tglx