On Wed, Oct 2, 2024 at 11:09 AM Namhyung Kim <namhyung@xxxxxxxxxx> wrote: > [...] > + > +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos) > +{ > + loff_t cnt = 0; > + bool found = false; > + struct kmem_cache *s; > + > + mutex_lock(&slab_mutex); > + > + /* > + * Find an entry at the given position in the slab_caches list instead > + * of keeping a reference (of the last visited entry, if any) out of > + * slab_mutex. It might miss something if one is deleted in the middle > + * while it releases the lock. But it should be rare and there's not > + * much we can do about it. > + */ > + list_for_each_entry(s, &slab_caches, list) { > + if (cnt == *pos) { > + /* > + * Make sure this entry remains in the list by getting > + * a new reference count. Note that boot_cache entries > + * have a negative refcount, so don't touch them. > + */ > + if (s->refcount > 0) > + s->refcount++; > + found = true; > + break; > + } > + cnt++; > + } > + mutex_unlock(&slab_mutex); > + > + if (!found) > + return NULL; > + > + ++*pos; This should be if (*pos == 0) ++*pos; > + return s; > +} > + > +static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v) [...]