[RFC V2 SLEB 11/14] SLEB: Add per node cache (with a fixed size for now)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The per node cache has the function of the shared cache in SLAB. However,
it will also perform the role of the alien cache in the future.

If the per cpu queues are exhausted then the shared cache will be consulted
first before acquiring objects directly from the slab pages.

On free objects will be first pushed into the shared cache before freeing
them directly to the slab pages.

Both methods allows other processes running on the same node to pickup the
freed objects that may be cache hot in shared caches. No approximation of
the actual topology is done though. It is simply assumed that all processors
on a node derive some benefit from acquiring an object that has been used
on another processor.

This is an initial static size of the shared cache.

Signed-off-by: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx>

---
 include/linux/slub_def.h |    3 +++
 mm/slub.c                |   41 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h	2010-05-21 13:08:11.000000000 -0500
+++ linux-2.6/include/linux/slub_def.h	2010-05-21 13:08:24.000000000 -0500
@@ -55,6 +55,9 @@ struct kmem_cache_node {
 	atomic_long_t total_objects;
 	struct list_head full;
 #endif
+	int objects;		/* Objects in the per node cache  */
+	spinlock_t shared_lock;	/* Serialization for per node cache */
+	void *object[BOOT_QUEUE_SIZE];
 };
 
 /*
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2010-05-21 13:08:12.000000000 -0500
+++ linux-2.6/mm/slub.c	2010-05-21 13:08:24.000000000 -0500
@@ -1418,6 +1418,22 @@ static struct page *get_partial(struct k
 void drain_objects(struct kmem_cache *s, void **object, int nr)
 {
 	int i;
+	struct kmem_cache_node *n = get_node(s, numa_node_id());
+
+	/* First drain to shared cache if its there */
+	if (n->objects < BOOT_QUEUE_SIZE) {
+		int d;
+
+		spin_lock(&n->shared_lock);
+		d = min(nr, BOOT_QUEUE_SIZE - n->objects);
+		if (d > 0) {
+			memcpy(n->object + n->objects, object, d * sizeof(void *));
+			n->objects += d;
+			nr -= d;
+			object += d;
+		}
+		spin_unlock(&n->shared_lock);
+	}
 
 	for (i = 0 ; i < nr; ) {
 
@@ -1725,6 +1741,29 @@ redo:
 		if (unlikely(!node_match(c, node))) {
 			flush_cpu_objects(s, c);
 			c->node = node;
+		} else {
+			struct kmem_cache_node *n = get_node(s, c->node);
+
+			/*
+			 * Node specified is matching the stuff that we cache,
+			 * so we could retrieve objects from the shared cache
+			 * of the indicated node if there would be anything
+			 * there.
+			 */
+			if (n->objects) {
+				int d;
+
+				spin_lock(&n->shared_lock);
+				d = min(min(s->batch, BOOT_QUEUE_SIZE), n->objects);
+				if (d > 0) {
+					memcpy(c->object + c->objects,
+						n->object + n->objects - d,
+						d * sizeof(void *));
+					n->objects -= d;
+					c->objects += d;
+				}
+				spin_unlock(&n->shared_lock);
+			}
 		}
 
 		while (c->objects < s->batch) {
@@ -2061,6 +2100,8 @@ init_kmem_cache_node(struct kmem_cache_n
 	atomic_long_set(&n->total_objects, 0);
 	INIT_LIST_HEAD(&n->full);
 #endif
+	spin_lock_init(&n->shared_lock);
+	n->objects = 0;
 }
 
 static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxxx  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>

[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]