Continue reducing the irq disabled scope. Check for per-cpu partial slabs with first with irqs enabled and then recheck with irqs disabled before grabbing the slab page. Mostly preparatory for the following patches. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 22b5bfef8aa6..acb5e8f1d9da 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2633,11 +2633,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (unlikely(node != NUMA_NO_NODE && !node_isset(node, slab_nodes))) node = NUMA_NO_NODE; - local_irq_save(flags); - if (unlikely(c->page)) { - local_irq_restore(flags); - goto reread_page; - } goto new_slab; } redo: @@ -2678,6 +2673,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (!freelist) { c->page = NULL; + local_irq_restore(flags); stat(s, DEACTIVATE_BYPASS); goto new_slab; } @@ -2707,12 +2703,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto reread_page; } deactivate_slab(s, page, c->freelist, c); + local_irq_restore(flags); new_slab: - lockdep_assert_irqs_disabled(); - if (slub_percpu_partial(c)) { + local_irq_save(flags); + if (unlikely(c->page)) { + local_irq_restore(flags); + goto reread_page; + } + if (unlikely(!slub_percpu_partial(c))) + goto new_objects; /* stolen by an IRQ handler */ + page = c->page = slub_percpu_partial(c); slub_set_percpu_partial(c, page); local_irq_restore(flags); @@ -2720,6 +2723,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto redo; } + local_irq_save(flags); + if (unlikely(c->page)) { + local_irq_restore(flags); + goto reread_page; + } + +new_objects: + + lockdep_assert_irqs_disabled(); + freelist = get_partial(s, gfpflags, node, &page); if (freelist) { c->page = page; @@ -2752,15 +2765,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, check_new_page: if (kmem_cache_debug(s)) { - if (!alloc_debug_processing(s, page, freelist, addr)) + if (!alloc_debug_processing(s, page, freelist, addr)) { /* Slab failed checks. Next slab needed */ + c->page = NULL; + local_irq_restore(flags); goto new_slab; - else + } else { /* * For debug case, we don't load freelist so that all * allocations go through alloc_debug_processing() */ goto return_single; + } } if (unlikely(!pfmemalloc_match(page, gfpflags))) -- 2.31.1