dump_unreclaimable_slab() acquires the slab_mutex first, and it won't remove any slab_caches list entry when itering the slab_caches lists. Thus, we do not need list_for_each_entry_safe here, which is against removal of list entry. Signed-off-by: Hui Su <sh_def@xxxxxxx> --- mm/slab_common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index f9ccd5dc13f3..0cd2821b7066 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -978,7 +978,7 @@ static int slab_show(struct seq_file *m, void *p) void dump_unreclaimable_slab(void) { - struct kmem_cache *s, *s2; + struct kmem_cache *s; struct slabinfo sinfo; /* @@ -996,7 +996,7 @@ void dump_unreclaimable_slab(void) pr_info("Unreclaimable slab info:\n"); pr_info("Name Used Total\n"); - list_for_each_entry_safe(s, s2, &slab_caches, list) { + list_for_each_entry(s, &slab_caches, list) { if (s->flags & SLAB_RECLAIM_ACCOUNT) continue; -- 2.25.1