From: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> Currently we only annotate the kmalloc caches, annotate all of them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> Cc: Hans Schillstrom <hans@xxxxxxxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxxxxxx> Cc: Matt Mackall <mpm@xxxxxxxxxxx> Cc: Sitsofe Wheeler <sitsofe@xxxxxxxxx> Cc: linux-mm@xxxxxxxxx Cc: David Rientjes <rientjes@xxxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Link: http://lkml.kernel.org/n/tip-10bey2cgpcvtbdkgigaoab8w@xxxxxxxxxxxxxx Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx> --- mm/slab.c | 52 ++++++++++++++++++++++++++++------------------------ 1 files changed, 28 insertions(+), 24 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 433b9a2..5251b99 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -606,6 +606,12 @@ int slab_is_available(void) return g_cpucache_up >= EARLY; } +/* + * Guard access to the cache-chain. + */ +static DEFINE_MUTEX(cache_chain_mutex); +static struct list_head cache_chain; + #ifdef CONFIG_LOCKDEP /* @@ -667,38 +673,41 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) slab_set_debugobj_lock_classes_node(cachep, node); } -static void init_node_lock_keys(int q) +static void init_lock_keys(struct kmem_cache *cachep, int node) { - struct cache_sizes *s = malloc_sizes; + struct kmem_list3 *l3; if (g_cpucache_up < LATE) return; - for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { - struct kmem_list3 *l3; + l3 = cachep->nodelists[node]; + if (!l3 || OFF_SLAB(cachep)) + return; - l3 = s->cs_cachep->nodelists[q]; - if (!l3 || OFF_SLAB(s->cs_cachep)) - continue; + slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node); +} - slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, - &on_slab_alc_key, q); - } +static void init_node_lock_keys(int node) +{ + struct kmem_cache *cachep; + + list_for_each_entry(cachep, &cache_chain, next) + init_lock_keys(cachep, node); } -static inline void init_lock_keys(void) +static inline void init_cachep_lock_keys(struct kmem_cache *cachep) { int node; for_each_node(node) - init_node_lock_keys(node); + init_lock_keys(cachep, node); } #else -static void init_node_lock_keys(int q) +static void init_node_lock_keys(int node) { } -static inline void init_lock_keys(void) +static void init_cachep_lock_keys(struct kmem_cache *cachep) { } @@ -711,12 +720,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) } #endif -/* - * Guard access to the cache-chain. - */ -static DEFINE_MUTEX(cache_chain_mutex); -static struct list_head cache_chain; - static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static DEFINE_PER_CPU(struct list_head, slab_free_list); static DEFINE_LOCAL_IRQ_LOCK(slab_lock); @@ -1728,14 +1731,13 @@ void __init kmem_cache_init_late(void) g_cpucache_up = LATE; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) + list_for_each_entry(cachep, &cache_chain, next) { + init_cachep_lock_keys(cachep); if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); + } mutex_unlock(&cache_chain_mutex); /* Done! */ @@ -2546,6 +2548,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, slab_set_debugobj_lock_classes(cachep); } + init_cachep_lock_keys(cachep); + /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); oops: -- 1.7.7.1
Attachment:
signature.asc
Description: This is a digitally signed message part