ping :) On 2020/8/11 10:02, wuyun.wu@xxxxxxxxxx wrote: > From: Abel Wu <wuyun.wu@xxxxxxxxxx> > > The commit below is incomplete, as it didn't handle the add_full() part. > commit a4d3f8916c65 ("slub: remove useless kmem_cache_debug() before remove_full()") > > This patch checks for SLAB_STORE_USER instead of kmem_cache_debug(), > since that should be the only context in which we need the list_lock for > add_full(). > > Signed-off-by: Abel Wu <wuyun.wu@xxxxxxxxxx> > --- > mm/slub.c | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) > > diff --git a/mm/slub.c b/mm/slub.c > index f226d66408ee..df93a5a0e9a4 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -2182,7 +2182,8 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, > } > } else { > m = M_FULL; > - if (kmem_cache_debug(s) && !lock) { > +#ifdef CONFIG_SLUB_DEBUG > + if ((s->flags & SLAB_STORE_USER) && !lock) { > lock = 1; > /* > * This also ensures that the scanning of full > @@ -2191,6 +2192,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, > */ > spin_lock(&n->list_lock); > } > +#endif > } > > if (l != m) { >