The patch titled Subject: mm: slab: free kmem_cache_node after destroy sysfs file has been added to the -mm tree. Its filename is mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Dmitry Safonov <dsafonov@xxxxxxxxxxxxx> Subject: mm: slab: free kmem_cache_node after destroy sysfs file v8: reintroduce locking in free_partial & nits from Vladimir v9: zapped __remove_partial and spin_lock_irq instead of spin_lock_irqsave (added BUG_ON(irqs_disabled()) to be sure) Signed-off-by: Dmitry Safonov <dsafonov@xxxxxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Vladimir Davydov <vdavydov@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slab.c | 9 ++------- mm/slub.c | 36 ++++++++++++++++++------------------ 2 files changed, 20 insertions(+), 25 deletions(-) diff -puN mm/slab.c~mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9 mm/slab.c --- a/mm/slab.c~mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9 +++ a/mm/slab.c @@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache * err = setup_cpu_cache(cachep, gfp); if (err) { - __kmem_cache_shutdown(cachep); + __kmem_cache_release(cachep); return err; } @@ -2414,12 +2414,7 @@ int __kmem_cache_shrink(struct kmem_cach int __kmem_cache_shutdown(struct kmem_cache *cachep) { - int rc = __kmem_cache_shrink(cachep, false); - - if (rc) - return rc; - - return 0; + return __kmem_cache_shrink(cachep, false); } void __kmem_cache_release(struct kmem_cache *cachep) diff -puN mm/slub.c~mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9 mm/slub.c --- a/mm/slub.c~mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9 +++ a/mm/slub.c @@ -1592,18 +1592,12 @@ static inline void add_partial(struct km __add_partial(n, page, tail); } -static inline void -__remove_partial(struct kmem_cache_node *n, struct page *page) -{ - list_del(&page->lru); - n->nr_partial--; -} - static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { lockdep_assert_held(&n->list_lock); - __remove_partial(n, page); + list_del(&page->lru); + n->nr_partial--; } /* @@ -3173,18 +3167,23 @@ static void early_kmem_cache_node_alloc( __add_partial(n, page, DEACTIVATE_TO_HEAD); } -void __kmem_cache_release(struct kmem_cache *s) +static void free_kmem_cache_nodes(struct kmem_cache *s) { int node; struct kmem_cache_node *n; - free_percpu(s->cpu_slab); for_each_kmem_cache_node(s, node, n) { kmem_cache_free(kmem_cache_node, n); s->node[node] = NULL; } } +void __kmem_cache_release(struct kmem_cache *s) +{ + free_percpu(s->cpu_slab); + free_kmem_cache_nodes(s); +} + static int init_kmem_cache_nodes(struct kmem_cache *s) { int node; @@ -3200,7 +3199,7 @@ static int init_kmem_cache_nodes(struct GFP_KERNEL, node); if (!n) { - __kmem_cache_release(s); + free_kmem_cache_nodes(s); return 0; } @@ -3406,7 +3405,7 @@ static int kmem_cache_open(struct kmem_c if (alloc_kmem_cache_cpus(s)) return 0; - __kmem_cache_release(s); + free_kmem_cache_nodes(s); error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " @@ -3444,22 +3443,25 @@ static void list_slab_objects(struct kme /* * Attempt to free all partial slabs on a node. - * This is called from __kmem_cache_shutdown(). We must be the last thread - * using the cache and therefore we do not need to lock anymore. + * This is called from __kmem_cache_shutdown(). We must take list_lock + * because sysfs file might still access partial list after the shutdowning. */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { struct page *page, *h; + BUG_ON(irqs_disabled()); + spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { - __remove_partial(n, page); + remove_partial(n, page); discard_slab(s, page); } else { list_slab_objects(s, page, "Objects remaining in %s on __kmem_cache_shutdown()"); } } + spin_unlock_irq(&n->list_lock); } /* @@ -3973,10 +3975,8 @@ int __kmem_cache_create(struct kmem_cach memcg_propagate_slab_attrs(s); err = sysfs_slab_add(s); - if (err) { - __kmem_cache_shutdown(s); + if (err) __kmem_cache_release(s); - } return err; } _ Patches currently in -mm which might be from dsafonov@xxxxxxxxxxxxx are mm-slab-free-kmem_cache_node-after-destroy-sysfs-file.patch mm-slab-free-kmem_cache_node-after-destroy-sysfs-file-v9.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html