Use kernel early return style to reduce indention level, by testing for kmem_cache_debug() and fallback to none-optimized bulking via __kmem_cache_alloc_bulk(). This also make it easier to fix a bug in the current implementation, in the next patch. Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> --- mm/slub.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index d18f8e195ac4..753f88bd8b40 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2757,32 +2757,33 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) EXPORT_SYMBOL(kmem_cache_free_bulk); bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - void **p) + void **p) { - if (!kmem_cache_debug(s)) { - struct kmem_cache_cpu *c; + struct kmem_cache_cpu *c; - /* Drain objects in the per cpu slab */ - local_irq_disable(); - c = this_cpu_ptr(s->cpu_slab); + /* Debugging fallback to generic bulk */ + if (kmem_cache_debug(s)) + return __kmem_cache_alloc_bulk(s, flags, size, p); - while (size) { - void *object = c->freelist; + /* Drain objects in the per cpu slab */ + local_irq_disable(); + c = this_cpu_ptr(s->cpu_slab); - if (!object) - break; + while (size) { + void *object = c->freelist; - c->freelist = get_freepointer(s, object); - *p++ = object; - size--; + if (!object) + break; - if (unlikely(flags & __GFP_ZERO)) - memset(object, 0, s->object_size); - } - c->tid = next_tid(c->tid); + c->freelist = get_freepointer(s, object); + *p++ = object; + size--; - local_irq_enable(); + if (unlikely(flags & __GFP_ZERO)) + memset(object, 0, s->object_size); } + c->tid = next_tid(c->tid); + local_irq_enable(); return __kmem_cache_alloc_bulk(s, flags, size, p); } -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>