Re: netfilter: active obj WARN when cleaning up

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 2 Dec 2013, Greg KH wrote:

> No, the release callback is in the kobj_type, not the kobject itself.

Ahh... Ok. Patch follows:


Subject: slub: use sysfs'es release mechanism for kmem_cache

Sysfs has a release mechanism. Use that to release the
kmem_cache structure if CONFIG_SYSFS is enabled.

Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>

Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h	2013-12-02 13:31:07.395905824 -0600
+++ linux/include/linux/slub_def.h	2013-12-02 13:31:07.385906101 -0600
@@ -98,4 +98,8 @@ struct kmem_cache {
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };

+#ifdef CONFIG_SYSFS
+#define SLAB_SUPPORTS_SYSFS
+#endif
+
 #endif /* _LINUX_SLUB_DEF_H */
Index: linux/mm/slab.h
===================================================================
--- linux.orig/mm/slab.h	2013-12-02 13:31:07.395905824 -0600
+++ linux/mm/slab.h	2013-12-02 13:39:46.671476284 -0600
@@ -57,6 +57,7 @@ struct mem_cgroup;
 struct kmem_cache *
 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
 		   size_t align, unsigned long flags, void (*ctor)(void *));
+void sysfs_slab_remove(struct kmem_cache *);
 #else
 static inline struct kmem_cache *
 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
@@ -91,6 +92,7 @@ __kmem_cache_alias(struct mem_cgroup *me
 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)

 int __kmem_cache_shutdown(struct kmem_cache *);
+void slab_kmem_cache_release(struct kmem_cache *);

 struct seq_file;
 struct file;
Index: linux/mm/slab_common.c
===================================================================
--- linux.orig/mm/slab_common.c	2013-12-02 13:31:07.395905824 -0600
+++ linux/mm/slab_common.c	2013-12-02 13:33:57.221186749 -0600
@@ -251,6 +251,12 @@ kmem_cache_create(const char *name, size
 }
 EXPORT_SYMBOL(kmem_cache_create);

+void slab_kmem_cache_release(struct kmem_cache *s)
+{
+		kfree(s->name);
+		kmem_cache_free(kmem_cache, s);
+}
+
 void kmem_cache_destroy(struct kmem_cache *s)
 {
 	/* Destroy all the children caches if we aren't a memcg cache */
@@ -268,8 +274,12 @@ void kmem_cache_destroy(struct kmem_cach
 				rcu_barrier();

 			memcg_release_cache(s);
-			kfree(s->name);
-			kmem_cache_free(kmem_cache, s);
+#ifdef SLAB_SUPPORTS_SYSFS
+			sysfs_slab_remove(s);
+#else
+			slab_kmem_cache_release();
+
+#endif
 		} else {
 			list_add(&s->list, &slab_caches);
 			mutex_unlock(&slab_mutex);
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c	2013-12-02 13:31:07.395905824 -0600
+++ linux/mm/slub.c	2013-12-02 13:35:44.208213810 -0600
@@ -210,7 +210,6 @@ enum track_item { TRACK_ALLOC, TRACK_FRE
 #ifdef CONFIG_SYSFS
 static int sysfs_slab_add(struct kmem_cache *);
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
-static void sysfs_slab_remove(struct kmem_cache *);
 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 #else
 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
@@ -3208,23 +3207,7 @@ static inline int kmem_cache_close(struc

 int __kmem_cache_shutdown(struct kmem_cache *s)
 {
-	int rc = kmem_cache_close(s);
-
-	if (!rc) {
-		/*
-		 * We do the same lock strategy around sysfs_slab_add, see
-		 * __kmem_cache_create. Because this is pretty much the last
-		 * operation we do and the lock will be released shortly after
-		 * that in slab_common.c, we could just move sysfs_slab_remove
-		 * to a later point in common code. We should do that when we
-		 * have a common sysfs framework for all allocators.
-		 */
-		mutex_unlock(&slab_mutex);
-		sysfs_slab_remove(s);
-		mutex_lock(&slab_mutex);
-	}
-
-	return rc;
+	return kmem_cache_close(s);
 }

 /********************************************************************
@@ -5073,6 +5056,11 @@ static void memcg_propagate_slab_attrs(s
 #endif
 }

+static void kmem_cache_release(struct kobject *k)
+{
+	slab_kmem_cache_release(to_slab(k));
+}
+
 static const struct sysfs_ops slab_sysfs_ops = {
 	.show = slab_attr_show,
 	.store = slab_attr_store,
@@ -5080,6 +5068,7 @@ static const struct sysfs_ops slab_sysfs

 static struct kobj_type slab_ktype = {
 	.sysfs_ops = &slab_sysfs_ops,
+	.release = kmem_cache_release,
 };

 static int uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -5184,7 +5173,7 @@ static int sysfs_slab_add(struct kmem_ca
 	return 0;
 }

-static void sysfs_slab_remove(struct kmem_cache *s)
+void sysfs_slab_remove(struct kmem_cache *s)
 {
 	if (slab_state < FULL)
 		/*
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Netfitler Users]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux