Next step: cover all of the per cpu objects available. Expand the bulk allocation support to drain the per cpu partial pages while interrupts are off. Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -2771,15 +2771,45 @@ bool kmem_cache_alloc_bulk(struct kmem_c while (size) { void *object = c->freelist; - if (!object) - break; + if (unlikely(!object)) { + /* + * Check if there remotely freed objects + * availalbe in the page. + */ + object = get_freelist(s, c->page); + + if (!object) { + /* + * All objects in use lets check if + * we have other per cpu partial + * pages that have available + * objects. + */ + c->page = c->partial; + if (!c->page) { + /* No per cpu objects left */ + c->freelist = NULL; + break; + } + + /* Next per cpu partial page */ + c->partial = c->page->next; + c->freelist = get_freelist(s, + c->page); + continue; + } + + } + - c->freelist = get_freepointer(s, object); *p++ = object; size--; if (unlikely(flags & __GFP_ZERO)) memset(object, 0, s->object_size); + + c->freelist = get_freepointer(s, object); + } c->tid = next_tid(c->tid); -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>