Make it possible to free a freelist with several objects by extending __slab_free() with two arguments: a freelist_head pointer and objects counter (cnt). If freelist_head pointer is set, then the object must be the freelist tail pointer. This allows a list of object to be free'ed using a single locked cmpxchg_double. Micro benchmarking showed no performance reduction due to this change. Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> --- mm/slub.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index c9305f525004..d0841a4c61ea 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2573,9 +2573,13 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); * So we still attempt to reduce cache line usage. Just take the slab * lock and free the item. If there is no additional partial page * handling required then we can return immediately. + * + * Bulk free of a freelist with several objects possible by specifying + * freelist_head ptr and object as tail ptr, plus objects count (cnt). */ static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, unsigned long addr) + void *x, unsigned long addr, + void *freelist_head, int cnt) { void *prior; void **object = (void *)x; @@ -2584,6 +2588,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, unsigned long counters; struct kmem_cache_node *n = NULL; unsigned long uninitialized_var(flags); + void *new_freelist = (!freelist_head) ? object : freelist_head; stat(s, FREE_SLOWPATH); @@ -2601,7 +2606,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, set_freepointer(s, object, prior); new.counters = counters; was_frozen = new.frozen; - new.inuse--; + new.inuse -= cnt; if ((!new.inuse || !prior) && !was_frozen) { if (kmem_cache_has_cpu_partial(s) && !prior) { @@ -2632,7 +2637,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } while (!cmpxchg_double_slab(s, page, prior, counters, - object, new.counters, + new_freelist, new.counters, "__slab_free")); if (likely(!n)) { @@ -2736,7 +2741,7 @@ redo: } stat(s, FREE_FASTPATH); } else - __slab_free(s, page, x, addr); + __slab_free(s, page, x, addr, NULL, 1); } @@ -2780,7 +2785,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) c->tid = next_tid(c->tid); local_irq_enable(); /* Slowpath: overhead locked cmpxchg_double_slab */ - __slab_free(s, page, object, _RET_IP_); + __slab_free(s, page, object, _RET_IP_, NULL, 1); local_irq_disable(); c = this_cpu_ptr(s->cpu_slab); } -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>