When we obtain a new slab page from node partial list or page allocator, we assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo the assignment. In order to allow doing the checks without irq disabled, restructure the code so that the checks are done first, and kmem_cache_cpu.page assignment only after they pass. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 43f38fd47062..c1a88ad4048b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2772,10 +2772,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, lockdep_assert_irqs_disabled(); freelist = get_partial(s, gfpflags, node, &page); - if (freelist) { - c->page = page; + if (freelist) goto check_new_page; - } local_irq_restore(flags); put_cpu_ptr(s->cpu_slab); @@ -2788,9 +2786,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } local_irq_save(flags); - if (c->page) - flush_slab(s, c); - /* * No other reference to the page yet so we can * muck around with it freely without cmpxchg @@ -2799,14 +2794,12 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, page->freelist = NULL; stat(s, ALLOC_SLAB); - c->page = page; check_new_page: if (kmem_cache_debug(s)) { if (!alloc_debug_processing(s, page, freelist, addr)) { /* Slab failed checks. Next slab needed */ - c->page = NULL; local_irq_restore(flags); goto new_slab; } else { @@ -2825,10 +2818,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, */ goto return_single; + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + goto load_freelist; return_single: + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + deactivate_slab(s, page, get_freepointer(s, freelist), c); local_irq_restore(flags); return freelist; -- 2.32.0