[PATCH 5/5] mm, slob: Trace allocation failures consistently

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch cleans how we trace kmalloc and kmem_cache_alloc.
In particular, it fixes out-of-memory tracing: now every failed
allocation will trace reporting non-zero requested bytes, zero obtained bytes.

Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Signed-off-by: Ezequiel Garcia <elezegarcia@xxxxxxxxx>
---
 mm/slob.c |   30 ++++++++++++++++++------------
 1 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/mm/slob.c b/mm/slob.c
index 3f4dc9a..73f16ca 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -428,6 +428,7 @@ static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
 {
 	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+	size_t alloc_size = 0;
 	void *ret;
 
 	gfp &= gfp_allowed_mask;
@@ -441,24 +442,25 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
 		ret = slob_alloc(size + align, gfp, align, node);
 
 		if (!ret)
-			return NULL;
+			goto trace_out;
 		*(unsigned int *)ret = size;
 		ret += align;
-
-		trace_kmalloc_node(caller, ret,
-				   size, size + align, gfp, node);
+		alloc_size = size + align;
 	} else {
 		unsigned int order = get_order(size);
 
 		if (likely(order))
 			gfp |= __GFP_COMP;
 		ret = slob_new_pages(gfp, order, node);
+		if (!ret)
+			goto trace_out;
 
-		trace_kmalloc_node(caller, ret,
-				   size, PAGE_SIZE << order, gfp, node);
+		alloc_size = PAGE_SIZE << order;
 	}
 
 	kmemleak_alloc(ret, size, 1, gfp);
+trace_out:
+	trace_kmalloc_node(caller, ret, size, alloc_size, gfp, node);
 	return ret;
 }
 
@@ -565,6 +567,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
 
 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
+	size_t alloc_size = 0;
 	void *b;
 
 	flags &= gfp_allowed_mask;
@@ -573,20 +576,23 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 
 	if (c->size < PAGE_SIZE) {
 		b = slob_alloc(c->size, flags, c->align, node);
-		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
-					    SLOB_UNITS(c->size) * SLOB_UNIT,
-					    flags, node);
+		if (!b)
+			goto trace_out;
+		alloc_size = SLOB_UNITS(c->size) * SLOB_UNIT;
 	} else {
 		b = slob_new_pages(flags, get_order(c->size), node);
-		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
-					    PAGE_SIZE << get_order(c->size),
-					    flags, node);
+		if (!b)
+			goto trace_out;
+		alloc_size = PAGE_SIZE << get_order(c->size);
 	}
 
 	if (c->ctor)
 		c->ctor(b);
 
 	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
+trace_out:
+	trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, alloc_size,
+				    flags, node);
 	return b;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
-- 
1.7.8.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]