On Mon, Jun 17, 2024 at 07:23:36PM +0200, Vlastimil Babka wrote: > On 6/17/24 6:12 PM, Paul E. McKenney wrote: > > On Mon, Jun 17, 2024 at 05:10:50PM +0200, Vlastimil Babka wrote: > >> On 6/13/24 2:22 PM, Jason A. Donenfeld wrote: > >> > On Wed, Jun 12, 2024 at 08:38:02PM -0700, Paul E. McKenney wrote: > >> >> o Make the current kmem_cache_destroy() asynchronously wait for > >> >> all memory to be returned, then complete the destruction. > >> >> (This gets rid of a valuable debugging technique because > >> >> in normal use, it is a bug to attempt to destroy a kmem_cache > >> >> that has objects still allocated.) > >> > >> This seems like the best option to me. As Jason already said, the debugging > >> technique is not affected significantly, if the warning just occurs > >> asynchronously later. The module can be already unloaded at that point, as > >> the leak is never checked programatically anyway to control further > >> execution, it's just a splat in dmesg. > > > > Works for me! > > Great. So this is how a prototype could look like, hopefully? The kunit test > does generate the splat for me, which should be because the rcu_barrier() in > the implementation (marked to be replaced with the real thing) is really > insufficient. Note the test itself passes as this kind of error isn't wired > up properly. > > Another thing to resolve is the marked comment about kasan_shutdown() with > potential kfree_rcu()'s in flight. > > Also you need CONFIG_SLUB_DEBUG enabled otherwise node_nr_slabs() is a no-op > and it might fail to notice the pending slabs. This will need to change. > > ----8<---- > diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c > index e6667a28c014..e3e4d0ca40b7 100644 > --- a/lib/slub_kunit.c > +++ b/lib/slub_kunit.c > @@ -5,6 +5,7 @@ > #include <linux/slab.h> > #include <linux/module.h> > #include <linux/kernel.h> > +#include <linux/rcupdate.h> > #include "../mm/slab.h" > > static struct kunit_resource resource; > @@ -157,6 +158,26 @@ static void test_kmalloc_redzone_access(struct kunit *test) > kmem_cache_destroy(s); > } > > +struct test_kfree_rcu_struct { > + struct rcu_head rcu; > +}; > + > +static void test_kfree_rcu(struct kunit *test) > +{ > + struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu", > + sizeof(struct test_kfree_rcu_struct), > + SLAB_NO_MERGE); > + struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL); > + > + kasan_disable_current(); > + > + KUNIT_EXPECT_EQ(test, 0, slab_errors); > + > + kasan_enable_current(); > + kfree_rcu(p, rcu); > + kmem_cache_destroy(s); > +} > + > static int test_init(struct kunit *test) > { > slab_errors = 0; > @@ -177,6 +198,7 @@ static struct kunit_case test_cases[] = { > > KUNIT_CASE(test_clobber_redzone_free), > KUNIT_CASE(test_kmalloc_redzone_access), > + KUNIT_CASE(test_kfree_rcu), > {} > }; > > diff --git a/mm/slab.h b/mm/slab.h > index b16e63191578..a0295600af92 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -277,6 +277,8 @@ struct kmem_cache { > unsigned int red_left_pad; /* Left redzone padding size */ > const char *name; /* Name (only for display!) */ > struct list_head list; /* List of slab caches */ > + struct work_struct async_destroy_work; > + > #ifdef CONFIG_SYSFS > struct kobject kobj; /* For sysfs */ > #endif > @@ -474,7 +476,7 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) > SLAB_NO_USER_FLAGS) > > bool __kmem_cache_empty(struct kmem_cache *); > -int __kmem_cache_shutdown(struct kmem_cache *); > +int __kmem_cache_shutdown(struct kmem_cache *, bool); > void __kmem_cache_release(struct kmem_cache *); > int __kmem_cache_shrink(struct kmem_cache *); > void slab_kmem_cache_release(struct kmem_cache *); > diff --git a/mm/slab_common.c b/mm/slab_common.c > index 5b1f996bed06..c5c356d0235d 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -44,6 +44,8 @@ static LIST_HEAD(slab_caches_to_rcu_destroy); > static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); > static DECLARE_WORK(slab_caches_to_rcu_destroy_work, > slab_caches_to_rcu_destroy_workfn); > +static void kmem_cache_kfree_rcu_destroy_workfn(struct work_struct *work); > + > > /* > * Set of flags that will prevent slab merging > @@ -234,6 +236,7 @@ static struct kmem_cache *create_cache(const char *name, > > s->refcount = 1; > list_add(&s->list, &slab_caches); > + INIT_WORK(&s->async_destroy_work, kmem_cache_kfree_rcu_destroy_workfn); > return s; > > out_free_cache: > @@ -449,12 +452,16 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) > } > } > > -static int shutdown_cache(struct kmem_cache *s) > +static int shutdown_cache(struct kmem_cache *s, bool warn_inuse) > { > /* free asan quarantined objects */ > + /* > + * XXX: is it ok to call this multiple times? and what happens with a > + * kfree_rcu() in flight that finishes after or in parallel with this? > + */ > kasan_cache_shutdown(s); > > - if (__kmem_cache_shutdown(s) != 0) > + if (__kmem_cache_shutdown(s, warn_inuse) != 0) > return -EBUSY; > > list_del(&s->list); > @@ -477,6 +484,32 @@ void slab_kmem_cache_release(struct kmem_cache *s) > kmem_cache_free(kmem_cache, s); > } > > +static void kmem_cache_kfree_rcu_destroy_workfn(struct work_struct *work) > +{ > + struct kmem_cache *s; > + int err = -EBUSY; > + bool rcu_set; > + > + s = container_of(work, struct kmem_cache, async_destroy_work); > + > + // XXX use the real kmem_cache_free_barrier() or similar thing here It implies that we need to introduce kfree_rcu_barrier(), a new API, which i wanted to avoid initially. Since you do it asynchronous can we just repeat and wait until it a cache is furry freed? I am asking because inventing a new kfree_rcu_barrier() might not be so straight forward. -- Uladzislau Rezki