Re: linux-next: build failure after merge of the akpm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Nov 06, 2014 at 12:24:00PM +0100, Thierry Reding wrote:
> On Thu, Nov 06, 2014 at 07:36:18PM +1100, Stephen Rothwell wrote:
> > Hi Andrew,
> > 
> > After merging the akpm tree, today's linux-next build (sparc defconfig)
> > failed like this:
> > 
> > mm/slab.c: In function 'slab_alloc':
> > mm/slab.c:3260:4: error: implicit declaration of function 'slab_free' [-Werror=implicit-function-declaration]
> >     slab_free(cachep, objp);
> >     ^
> > mm/slab.c: At top level:
> > mm/slab.c:3534:29: warning: conflicting types for 'slab_free'
> >  static __always_inline void slab_free(struct kmem_cache *cachep, void *objp)
> >                              ^
> > mm/slab.c:3534:29: error: static declaration of 'slab_free' follows non-static declaration
> > mm/slab.c:3260:4: note: previous implicit declaration of 'slab_free' was here
> >     slab_free(cachep, objp);
> >     ^
> > 
> > I am not sure what caused this, but it is something in the akpm-current
> > or akpm trees (possibly interacting with something else).
> 
> I fixed this using the attached patch.
> 
> Thierry

> From a9ec6cfacb892e044a5f76dce3b7fc9a3337d01a Mon Sep 17 00:00:00 2001
> From: Thierry Reding <treding@xxxxxxxxxx>
> Date: Thu, 6 Nov 2014 11:59:23 +0100
> Subject: [PATCH] mm/slab: Always predeclare slab_free()
> 
> Commit 5da1c3c725ab ("slab: recharge slab pages to the allocating memory
> cgroup") added a predeclaration of the new slab_free() helper so that it
> can be used in slab_alloc_node() and slab_alloc(). However the prototype
> is added in an #ifdef CONFIG_NUMA protected section of code, which works
> fine for slab_alloc_node() but fails for slab_alloc().
> 
> Fixes the following build warnings and errors:
> 
> 	mm/slab.c: In function 'slab_alloc':
> 	mm/slab.c:3260:4: error: implicit declaration of function 'slab_free' [-Werror=implicit-function-declaration]
> 	    slab_free(cachep, objp);
> 	    ^
> 	mm/slab.c: At top level:
> 	mm/slab.c:3534:29: warning: conflicting types for 'slab_free'
> 	 static __always_inline void slab_free(struct kmem_cache *cachep, void *objp)
> 				     ^
> 	mm/slab.c:3534:29: error: static declaration of 'slab_free' follows non-static declaration
> 	mm/slab.c:3260:4: note: previous implicit declaration of 'slab_free' was here
> 	    slab_free(cachep, objp);
> 	    ^
> 
> Reported-by: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx>
> Signed-off-by: Thierry Reding <treding@xxxxxxxxxx>
> ---
>  mm/slab.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/slab.c b/mm/slab.c
> index 61b01c2ae1d9..00cd028404cb 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -2961,6 +2961,8 @@ out:
>  	return objp;
>  }
>  
> +static __always_inline void slab_free(struct kmem_cache *cachep, void *objp);
> +
>  #ifdef CONFIG_NUMA
>  /*
>   * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
> @@ -3133,8 +3135,6 @@ done:
>  	return obj;
>  }
>  
> -static __always_inline void slab_free(struct kmem_cache *cachep, void *objp);
> -
>  static __always_inline void *
>  slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
>  		   unsigned long caller)

Attached is a follow-up cleanup patch that avoids the need for a pre-
declaration. Feel free to ignore if you prefer the predeclaration.

Thierry
From 3ad8947fd8613d13f2b9dc1ccd370d0a9ffaeb7b Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@xxxxxxxxxx>
Date: Thu, 6 Nov 2014 12:25:03 +0100
Subject: [PATCH] mm/slab: Shuffle code around to avoid a predeclaration

Instead of providing a predeclaration for the slab_free() helper, move
the helper and its dependencies before any of their users.

Signed-off-by: Thierry Reding <treding@xxxxxxxxxx>
---
 mm/slab.c | 200 +++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 99 insertions(+), 101 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 00cd028404cb..3c025f5934da 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2961,7 +2961,105 @@ out:
 	return objp;
 }
 
-static __always_inline void slab_free(struct kmem_cache *cachep, void *objp);
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+{
+	int batchcount;
+	struct kmem_cache_node *n;
+	int node = numa_mem_id();
+	LIST_HEAD(list);
+
+	batchcount = ac->batchcount;
+#if DEBUG
+	BUG_ON(!batchcount || batchcount > ac->avail);
+#endif
+	check_irq_off();
+	n = get_node(cachep, node);
+	spin_lock(&n->list_lock);
+	if (n->shared) {
+		struct array_cache *shared_array = n->shared;
+		int max = shared_array->limit - shared_array->avail;
+		if (max) {
+			if (batchcount > max)
+				batchcount = max;
+			memcpy(&(shared_array->entry[shared_array->avail]),
+			       ac->entry, sizeof(void *) * batchcount);
+			shared_array->avail += batchcount;
+			goto free_done;
+		}
+	}
+
+	free_block(cachep, ac->entry, batchcount, node, &list);
+free_done:
+#if STATS
+	{
+		int i = 0;
+		struct list_head *p;
+
+		p = n->slabs_free.next;
+		while (p != &(n->slabs_free)) {
+			struct page *page;
+
+			page = list_entry(p, struct page, lru);
+			BUG_ON(page->active);
+
+			i++;
+			p = p->next;
+		}
+		STATS_SET_FREEABLE(cachep, i);
+	}
+#endif
+	spin_unlock(&n->list_lock);
+	slabs_destroy(cachep, &list);
+	ac->avail -= batchcount;
+	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
+}
+
+/*
+ * Release an obj back to its cache. If the obj has a constructed state, it must
+ * be in this state _before_ it is released.  Called with disabled ints.
+ */
+static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+				unsigned long caller)
+{
+	struct array_cache *ac = cpu_cache_get(cachep);
+
+	check_irq_off();
+	kmemleak_free_recursive(objp, cachep->flags);
+	objp = cache_free_debugcheck(cachep, objp, caller);
+
+	kmemcheck_slab_free(cachep, objp, cachep->object_size);
+
+	/*
+	 * Skip calling cache_free_alien() when the platform is not numa.
+	 * This will avoid cache misses that happen while accessing slabp (which
+	 * is per page memory  reference) to get nodeid. Instead use a global
+	 * variable to skip the call, which is mostly likely to be present in
+	 * the cache.
+	 */
+	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
+		return;
+
+	if (ac->avail < ac->limit) {
+		STATS_INC_FREEHIT(cachep);
+	} else {
+		STATS_INC_FREEMISS(cachep);
+		cache_flusharray(cachep, ac);
+	}
+
+	ac_put_obj(cachep, ac, objp);
+}
+
+static __always_inline void slab_free(struct kmem_cache *cachep, void *objp)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	debug_check_no_locks_freed(objp, cachep->object_size);
+	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+		debug_check_no_obj_freed(objp, cachep->object_size);
+	__cache_free(cachep, objp, _RET_IP_);
+	local_irq_restore(flags);
+}
 
 #ifdef CONFIG_NUMA
 /*
@@ -3307,94 +3405,6 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
 	}
 }
 
-static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
-{
-	int batchcount;
-	struct kmem_cache_node *n;
-	int node = numa_mem_id();
-	LIST_HEAD(list);
-
-	batchcount = ac->batchcount;
-#if DEBUG
-	BUG_ON(!batchcount || batchcount > ac->avail);
-#endif
-	check_irq_off();
-	n = get_node(cachep, node);
-	spin_lock(&n->list_lock);
-	if (n->shared) {
-		struct array_cache *shared_array = n->shared;
-		int max = shared_array->limit - shared_array->avail;
-		if (max) {
-			if (batchcount > max)
-				batchcount = max;
-			memcpy(&(shared_array->entry[shared_array->avail]),
-			       ac->entry, sizeof(void *) * batchcount);
-			shared_array->avail += batchcount;
-			goto free_done;
-		}
-	}
-
-	free_block(cachep, ac->entry, batchcount, node, &list);
-free_done:
-#if STATS
-	{
-		int i = 0;
-		struct list_head *p;
-
-		p = n->slabs_free.next;
-		while (p != &(n->slabs_free)) {
-			struct page *page;
-
-			page = list_entry(p, struct page, lru);
-			BUG_ON(page->active);
-
-			i++;
-			p = p->next;
-		}
-		STATS_SET_FREEABLE(cachep, i);
-	}
-#endif
-	spin_unlock(&n->list_lock);
-	slabs_destroy(cachep, &list);
-	ac->avail -= batchcount;
-	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-}
-
-/*
- * Release an obj back to its cache. If the obj has a constructed state, it must
- * be in this state _before_ it is released.  Called with disabled ints.
- */
-static inline void __cache_free(struct kmem_cache *cachep, void *objp,
-				unsigned long caller)
-{
-	struct array_cache *ac = cpu_cache_get(cachep);
-
-	check_irq_off();
-	kmemleak_free_recursive(objp, cachep->flags);
-	objp = cache_free_debugcheck(cachep, objp, caller);
-
-	kmemcheck_slab_free(cachep, objp, cachep->object_size);
-
-	/*
-	 * Skip calling cache_free_alien() when the platform is not numa.
-	 * This will avoid cache misses that happen while accessing slabp (which
-	 * is per page memory  reference) to get nodeid. Instead use a global
-	 * variable to skip the call, which is mostly likely to be present in
-	 * the cache.
-	 */
-	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
-		return;
-
-	if (ac->avail < ac->limit) {
-		STATS_INC_FREEHIT(cachep);
-	} else {
-		STATS_INC_FREEMISS(cachep);
-		cache_flusharray(cachep, ac);
-	}
-
-	ac_put_obj(cachep, ac, objp);
-}
-
 /**
  * kmem_cache_alloc - Allocate an object
  * @cachep: The cache to allocate from.
@@ -3531,18 +3541,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
 
-static __always_inline void slab_free(struct kmem_cache *cachep, void *objp)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	debug_check_no_locks_freed(objp, cachep->object_size);
-	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
-		debug_check_no_obj_freed(objp, cachep->object_size);
-	__cache_free(cachep, objp, _RET_IP_);
-	local_irq_restore(flags);
-}
-
 /**
  * kmem_cache_free - Deallocate an object
  * @cachep: The cache the allocation was from.
-- 
2.1.3

Attachment: pgpvNkXXsrWjv.pgp
Description: PGP signature


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux