Continue reducing the irq disabled scope. Check for per-cpu partial slabs with first with irqs enabled and then recheck with irqs disabled before grabbing the slab page. Mostly preparatory for the following patches. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index e243a991b15b..16f3cbb81627 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2653,11 +2653,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (unlikely(node != NUMA_NO_NODE && !node_isset(node, slab_nodes))) node = NUMA_NO_NODE; - local_irq_save(flags); - if (unlikely(c->page)) { - local_irq_restore(flags); - goto reread_page; - } goto new_slab; } redo: @@ -2698,6 +2693,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (!freelist) { c->page = NULL; + local_irq_restore(flags); stat(s, DEACTIVATE_BYPASS); goto new_slab; } @@ -2723,10 +2719,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto reread_page; } deactivate_slab(s, page, c->freelist, c); + local_irq_restore(flags); new_slab: if (slub_percpu_partial(c)) { + local_irq_save(flags); + if (unlikely(c->page)) { + local_irq_restore(flags); + goto reread_page; + } + if (unlikely(!slub_percpu_partial(c))) /* stolen by IRQ? */ + goto new_objects; + page = c->page = slub_percpu_partial(c); slub_set_percpu_partial(c, page); local_irq_restore(flags); @@ -2734,6 +2739,14 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto redo; } + local_irq_save(flags); + if (unlikely(c->page)) { + local_irq_restore(flags); + goto reread_page; + } + +new_objects: + freelist = get_partial(s, gfpflags, node, &page); if (freelist) { c->page = page; @@ -2767,15 +2780,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, check_new_page: if (kmem_cache_debug(s)) { - if (!alloc_debug_processing(s, page, freelist, addr)) + if (!alloc_debug_processing(s, page, freelist, addr)) { /* Slab failed checks. Next slab needed */ + c->page = NULL; + local_irq_restore(flags); goto new_slab; - else + } else { /* * For debug case, we don't load freelist so that all * allocations go through alloc_debug_processing() */ goto return_single; + } } if (unlikely(!pfmemalloc_match(page, gfpflags))) -- 2.31.1