The current kmem_cache/SLAB bulking API need to release all objects in case the layer cannot satisfy the full request. If __kmem_cache_alloc_bulk() fails, all allocated objects in array should be freed, but, __kmem_cache_alloc_bulk() can't know about objects allocated by this slub specific kmem_cache_alloc_bulk() function. Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> --- mm/slub.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 753f88bd8b40..d10de5a33c03 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2760,24 +2760,27 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; + int i; /* Debugging fallback to generic bulk */ if (kmem_cache_debug(s)) return __kmem_cache_alloc_bulk(s, flags, size, p); - /* Drain objects in the per cpu slab */ + /* Drain objects in the per cpu slab, while disabling local + * IRQs, which protects against PREEMPT and interrupts + * handlers invoking normal fastpath. + */ local_irq_disable(); c = this_cpu_ptr(s->cpu_slab); - while (size) { + for (i = 0; i < size; i++) { void *object = c->freelist; if (!object) break; c->freelist = get_freepointer(s, object); - *p++ = object; - size--; + p[i] = object; if (unlikely(flags & __GFP_ZERO)) memset(object, 0, s->object_size); @@ -2785,7 +2788,15 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c->tid = next_tid(c->tid); local_irq_enable(); - return __kmem_cache_alloc_bulk(s, flags, size, p); + /* Fallback to single elem alloc */ + for (; i < size; i++) { + void *x = p[i] = kmem_cache_alloc(s, flags); + if (unlikely(!x)) { + __kmem_cache_free_bulk(s, i, p); + return false; + } + } + return true; } EXPORT_SYMBOL(kmem_cache_alloc_bulk); -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>