On Mon, Jun 17, 2024 at 03:50:56PM +0200, Uladzislau Rezki wrote: > On Fri, Jun 14, 2024 at 09:33:45PM +0200, Jason A. Donenfeld wrote: > > On Fri, Jun 14, 2024 at 02:35:33PM +0200, Uladzislau Rezki wrote: > > > + /* Should a destroy process be deferred? */ > > > + if (s->flags & SLAB_DEFER_DESTROY) { > > > + list_move_tail(&s->list, &slab_caches_defer_destroy); > > > + schedule_delayed_work(&slab_caches_defer_destroy_work, HZ); > > > + goto out_unlock; > > > + } > > > > Wouldn't it be smoother to have the actual kmem_cache_free() function > > check to see if it's been marked for destruction and the refcount is > > zero, rather than polling every one second? I mentioned this approach > > in: https://lore.kernel.org/all/Zmo9-YGraiCj5-MI@xxxxxxxxx/ - > > > > I wonder if the right fix to this would be adding a `should_destroy` > > boolean to kmem_cache, which kmem_cache_destroy() sets to true. And > > then right after it checks `if (number_of_allocations == 0) > > actually_destroy()`, and likewise on each kmem_cache_free(), it > > could check `if (should_destroy && number_of_allocations == 0) > > actually_destroy()`. > > > I do not find pooling as bad way we can go with. But your proposal > sounds reasonable to me also. We can combine both "prototypes" to > one and offer. > > Can you post a prototype here? This is untested, but the simplest, shortest possible version would be: diff --git a/mm/slab.h b/mm/slab.h index 5f8f47c5bee0..907c0ea56c01 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -275,6 +275,7 @@ struct kmem_cache { unsigned int inuse; /* Offset to metadata */ unsigned int align; /* Alignment */ unsigned int red_left_pad; /* Left redzone padding size */ + bool is_destroyed; /* Destruction happens when no objects */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS diff --git a/mm/slab_common.c b/mm/slab_common.c index 1560a1546bb1..f700bed066d9 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -494,8 +494,8 @@ void kmem_cache_destroy(struct kmem_cache *s) goto out_unlock; err = shutdown_cache(s); - WARN(err, "%s %s: Slab cache still has objects when called from %pS", - __func__, s->name, (void *)_RET_IP_); + if (err) + s->is_destroyed = true; out_unlock: mutex_unlock(&slab_mutex); cpus_read_unlock(); diff --git a/mm/slub.c b/mm/slub.c index 1373ac365a46..7db8fe90a323 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4510,6 +4510,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) return; trace_kmem_cache_free(_RET_IP_, x, s); slab_free(s, virt_to_slab(x), x, _RET_IP_); + if (s->is_destroyed) + kmem_cache_destroy(s); } EXPORT_SYMBOL(kmem_cache_free); @@ -5342,9 +5344,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) if (!slab->inuse) { remove_partial(n, slab); list_add(&slab->slab_list, &discard); - } else { - list_slab_objects(s, slab, - "Objects remaining in %s on __kmem_cache_shutdown()"); } } spin_unlock_irq(&n->list_lock);