It is advantageous to have all defragmentable slabs together at the beginning of the list of slabs so that there is no need to scan the complete list. Put defragmentable caches first when adding a slab cache and others last. Determine the maximum number of objects in defragmentable slabs. This allows the sizing of the array holding refs to objects in a slab later. Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> --- mm/slub.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -194,6 +194,9 @@ static inline bool kmem_cache_has_cpu_pa #define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ +/* Maximum objects in defragmentable slabs */ +static unsigned int max_defrag_slab_objects; + /* * Tracking user of a slab. */ @@ -2715,6 +2718,7 @@ redo: if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->object_size); + list_add_tail(&s->list, &slab_caches); slab_post_alloc_hook(s, gfpflags, 1, &object); return object; @@ -4260,22 +4264,44 @@ int __kmem_cache_create(struct kmem_cach return err; } +/* + * Allocate a slab scratch space that is sufficient to keep at least + * max_defrag_slab_objects pointers to individual objects and also a bitmap + * for max_defrag_slab_objects. + */ +static inline void *alloc_scratch(void) +{ + return kmalloc(max_defrag_slab_objects * sizeof(void *) + + BITS_TO_LONGS(max_defrag_slab_objects) * sizeof(unsigned long), + GFP_KERNEL); +} + void kmem_cache_setup_defrag(struct kmem_cache *s, kmem_defrag_get_func get, kmem_defrag_kick_func kick) { + int max_objects = oo_objects(s->max); + /* * Defragmentable slabs must have a ctor otherwise objects may be * in an undetermined state after they are allocated. */ BUG_ON(!s->ctor); + mutex_lock(&slab_mutex); + s->get = get; s->kick = kick; + /* * Sadly serialization requirements currently mean that we have * to disable fast cmpxchg based processing. */ s->flags &= ~__CMPXCHG_DOUBLE; + list_move(&s->list, &slab_caches); /* Move to top */ + if (max_objects > max_defrag_slab_objects) + max_defrag_slab_objects = max_objects; + + mutex_unlock(&slab_mutex); } EXPORT_SYMBOL(kmem_cache_setup_defrag); Index: linux/mm/slab_common.c =================================================================== --- linux.orig/mm/slab_common.c +++ linux/mm/slab_common.c @@ -384,7 +384,7 @@ static struct kmem_cache *create_cache(c goto out_free_cache; s->refcount = 1; - list_add(&s->list, &slab_caches); + list_add_tail(&s->list, &slab_caches); memcg_link_cache(s); out: if (err) -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>