The slub allocator creates a copy of the name string, and frees it later. I would like all caches to behave the same, whether it is the slab+slob starting to create a copy of it itself, or the slub ceasing to. This patch creates copies of the name string for slob and slab, adopting slub behavior for them all. For the slab, we can't really do it before the kmalloc caches are up. So we manually do it before the end of EARLY phase, and conditionally do it for the caches created afterwards. [ v2: Also dup string for early caches, requested by David Rientjes ] Signed-off-by: Glauber Costa <glommer@xxxxxxxxxxxxx> CC: Christoph Lameter <cl@xxxxxxxxx> CC: Pekka Enberg <penberg@xxxxxxxxxxxxxx> CC: David Rientjes <rientjes@xxxxxxxxxx> --- mm/slab.c | 37 +++++++++++++++++++++++++++++++++++-- mm/slob.c | 12 ++++++++++-- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index e901a36..fe05f8bf 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1676,6 +1676,33 @@ void __init kmem_cache_init(void) } } + /* + * create a copy of all the name strings for early caches. This is + * so deleting those caches will work in a consistent way. We don't + * expect allocation failures this early in the process, just make sure + * they didn't happen. + */ + sizes = malloc_sizes; + + while (sizes->cs_size != ULONG_MAX) { + struct kmem_cache *cachep; + + cachep = sizes->cs_cachep; + if (cachep) { + cachep->name = kstrdup(cachep->name, GFP_NOWAIT); + BUG_ON(!cachep->name); + } + + cachep = sizes->cs_dmacachep; + if (cachep) { + cachep->name = kstrdup(cachep->name, GFP_NOWAIT); + BUG_ON(!cachep->name); + } + sizes++; + } + + cache_cache.name = kstrdup(cache_cache.name, GFP_NOWAIT); + BUG_ON(!cache_cache.name); g_cpucache_up = EARLY; } @@ -2118,6 +2145,7 @@ static void __kmem_cache_destroy(struct kmem_cache *cachep) kfree(l3); } } + kfree(cachep->name); kmem_cache_free(&cache_cache, cachep); } @@ -2526,9 +2554,14 @@ kmem_cache_create (const char *name, size_t size, size_t align, BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } cachep->ctor = ctor; - cachep->name = name; - if (setup_cpu_cache(cachep, gfp)) { + /* Can't do strdup while kmalloc is not up */ + if (slab_is_available()) + cachep->name = kstrdup(name, GFP_KERNEL); + else + cachep->name = name; + + if (!cachep->name || setup_cpu_cache(cachep, gfp)) { __kmem_cache_destroy(cachep); cachep = NULL; goto oops; diff --git a/mm/slob.c b/mm/slob.c index 8105be4..8f10d36 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -575,7 +575,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); if (c) { - c->name = name; + c->name = kstrdup(name, GFP_KERNEL); + if (!c->name) { + slob_free(c, sizeof(struct kmem_cache)); + c = NULL; + goto out; + } c->size = size; if (flags & SLAB_DESTROY_BY_RCU) { /* leave room for rcu footer at the end of object */ @@ -589,7 +594,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, c->align = ARCH_SLAB_MINALIGN; if (c->align < align) c->align = align; - } else if (flags & SLAB_PANIC) + } +out: + if (!c && (flags & SLAB_PANIC)) panic("Cannot create slab cache %s\n", name); kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); @@ -602,6 +609,7 @@ void kmem_cache_destroy(struct kmem_cache *c) kmemleak_free(c); if (c->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); + kfree(c->name); slob_free(c, sizeof(struct kmem_cache)); } EXPORT_SYMBOL(kmem_cache_destroy); -- 1.7.7.6 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>