Re: [S+Q Cleanup3 4/6] slub: Dynamically size kmalloc cache allocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 20 Aug 2010, Christoph Lameter wrote:

> Draft patch to drop SMP particularities.
> 

s/SMP/NUMA/

> ---
>  include/linux/slub_def.h |    5 +----
>  mm/slub.c                |   39 +--------------------------------------
>  2 files changed, 2 insertions(+), 42 deletions(-)
> 
> Index: linux-2.6/include/linux/slub_def.h
> ===================================================================
> --- linux-2.6.orig/include/linux/slub_def.h	2010-08-20 12:08:48.000000000 -0500
> +++ linux-2.6/include/linux/slub_def.h	2010-08-20 12:09:17.000000000 -0500
> @@ -96,11 +96,8 @@ struct kmem_cache {
>  	 * Defragmentation by allocating from a remote node.
>  	 */
>  	int remote_node_defrag_ratio;
> -	struct kmem_cache_node *node[MAX_NUMNODES];
> -#else
> -	/* Avoid an extra cache line for UP */
> -	struct kmem_cache_node local_node;
>  #endif
> +	struct kmem_cache_node *node[MAX_NUMNODES];
>  };
> 
>  /*
> Index: linux-2.6/mm/slub.c
> ===================================================================
> --- linux-2.6.orig/mm/slub.c	2010-08-20 12:09:19.000000000 -0500
> +++ linux-2.6/mm/slub.c	2010-08-20 12:31:11.000000000 -0500
> @@ -232,11 +232,7 @@ int slab_is_available(void)
> 
>  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
>  {
> -#ifdef CONFIG_NUMA
>  	return s->node[node];
> -#else
> -	return &s->local_node;
> -#endif
>  }
> 
>  /* Verify that a pointer has an address that is valid within a slab page */
> @@ -837,7 +833,7 @@ static inline void inc_slabs_node(struct
>  	 * dilemma by deferring the increment of the count during
>  	 * bootstrap (see early_kmem_cache_node_alloc).
>  	 */
> -	if (!NUMA_BUILD || n) {
> +	if (n) {
>  		atomic_long_inc(&n->nr_slabs);
>  		atomic_long_add(objects, &n->total_objects);
>  	}
> @@ -2071,7 +2067,6 @@ static inline int alloc_kmem_cache_cpus(
>  	return s->cpu_slab != NULL;
>  }
> 
> -#ifdef CONFIG_NUMA
>  static struct kmem_cache *kmem_cache_node;
> 
>  /*
> @@ -2161,17 +2156,6 @@ static int init_kmem_cache_nodes(struct
>  	}
>  	return 1;
>  }
> -#else
> -static void free_kmem_cache_nodes(struct kmem_cache *s)
> -{
> -}
> -
> -static int init_kmem_cache_nodes(struct kmem_cache *s)
> -{
> -	init_kmem_cache_node(&s->local_node, s);
> -	return 1;
> -}
> -#endif
> 
>  static void set_min_partial(struct kmem_cache *s, unsigned long min)
>  {
> @@ -2982,8 +2966,6 @@ void __init kmem_cache_init(void)
>  	int caches = 0;
>  	struct kmem_cache *temp_kmem_cache;
>  	int order;
> -
> -#ifdef CONFIG_NUMA
>  	struct kmem_cache *temp_kmem_cache_node;
>  	unsigned long kmalloc_size;
> 
> @@ -3007,12 +2989,6 @@ void __init kmem_cache_init(void)
>  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
> 
>  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
> -#else
> -	/* Allocate a single kmem_cache from the page allocator */
> -	kmem_size = sizeof(struct kmem_cache);
> -	order = get_order(kmem_size);
> -	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
> -#endif
> 
>  	/* Able to allocate the per node structures */
>  	slab_state = PARTIAL;
> @@ -3023,7 +2999,6 @@ void __init kmem_cache_init(void)
>  	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
>  	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
> 
> -#ifdef CONFIG_NUMA
>  	/*
>  	 * Allocate kmem_cache_node properly from the kmem_cache slab.
>  	 * kmem_cache_node is separately allocated so no need to
> @@ -3037,18 +3012,6 @@ void __init kmem_cache_init(void)
>  	kmem_cache_bootstrap_fixup(kmem_cache_node);
> 
>  	caches++;
> -#else
> -	/*
> -	 * kmem_cache has kmem_cache_node embedded and we moved it!
> -	 * Update the list heads
> -	 */
> -	INIT_LIST_HEAD(&kmem_cache->local_node.partial);
> -	list_splice(&temp_kmem_cache->local_node.partial, &kmem_cache->local_node.partial);
> -#ifdef CONFIG_SLUB_DEBUG
> -	INIT_LIST_HEAD(&kmem_cache->local_node.full);
> -	list_splice(&temp_kmem_cache->local_node.full, &kmem_cache->local_node.full);
> -#endif
> -#endif
>  	kmem_cache_bootstrap_fixup(kmem_cache);
>  	caches++;
>  	/* Free temporary boot structure */
> 

I really like this direction and I hope you push an updated version to 
Pekka because it cleans up a lot of the recently added init code without 
sacrificing any footprint for UMA.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxxx  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]