From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, slub: allocate private object map for validate_slab_cache() validate_slab_cache() is called either to handle a sysfs write, or from a self-test context. In both situations it's straightforward to preallocate a private object bitmap instead of grabbing the shared static one meant for critical sections, so let's do that. Link: https://lkml.kernel.org/r/20210805152000.12817-4-vbabka@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Acked-by: Christoph Lameter <cl@xxxxxxxxx> Acked-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slub.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) --- a/mm/slub.c~mm-slub-allocate-private-object-map-for-validate_slab_cache +++ a/mm/slub.c @@ -4679,11 +4679,11 @@ static int count_total(struct page *page #endif #ifdef CONFIG_SLUB_DEBUG -static void validate_slab(struct kmem_cache *s, struct page *page) +static void validate_slab(struct kmem_cache *s, struct page *page, + unsigned long *obj_map) { void *p; void *addr = page_address(page); - unsigned long *map; slab_lock(page); @@ -4691,21 +4691,20 @@ static void validate_slab(struct kmem_ca goto unlock; /* Now we know that a valid freelist exists */ - map = get_map(s, page); + __fill_map(obj_map, s, page); for_each_object(p, s, addr, page->objects) { - u8 val = test_bit(__obj_to_index(s, addr, p), map) ? + u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; if (!check_object(s, page, p, val)) break; } - put_map(map); unlock: slab_unlock(page); } static int validate_slab_node(struct kmem_cache *s, - struct kmem_cache_node *n) + struct kmem_cache_node *n, unsigned long *obj_map) { unsigned long count = 0; struct page *page; @@ -4714,7 +4713,7 @@ static int validate_slab_node(struct kme spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) { - validate_slab(s, page); + validate_slab(s, page, obj_map); count++; } if (count != n->nr_partial) { @@ -4727,7 +4726,7 @@ static int validate_slab_node(struct kme goto out; list_for_each_entry(page, &n->full, slab_list) { - validate_slab(s, page); + validate_slab(s, page, obj_map); count++; } if (count != atomic_long_read(&n->nr_slabs)) { @@ -4746,10 +4745,17 @@ long validate_slab_cache(struct kmem_cac int node; unsigned long count = 0; struct kmem_cache_node *n; + unsigned long *obj_map; + + obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); + if (!obj_map) + return -ENOMEM; flush_all(s); for_each_kmem_cache_node(s, node, n) - count += validate_slab_node(s, n); + count += validate_slab_node(s, n, obj_map); + + bitmap_free(obj_map); return count; } _