When we obtain a new slab page from node partial list or page allocator, we assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo the assignment. In order to allow doing the checks without irq disabled, restructure the code so that checks go first, and kmem_cache_cpu assignment only afterwards. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 47a3438d6a35..78d7eb5be951 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2743,10 +2743,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, new_objects: freelist = get_partial(s, gfpflags, node, &page); - if (freelist) { - c->page = page; + if (freelist) goto check_new_page; - } local_irq_restore(flags); migrate_enable(); @@ -2760,9 +2758,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } local_irq_save(flags); - if (c->page) - flush_slab(s, c); - /* * No other reference to the page yet so we can * muck around with it freely without cmpxchg @@ -2771,14 +2766,12 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, page->freelist = NULL; stat(s, ALLOC_SLAB); - c->page = page; check_new_page: if (kmem_cache_debug(s)) { if (!alloc_debug_processing(s, page, freelist, addr)) { /* Slab failed checks. Next slab needed */ - c->page = NULL; local_irq_restore(flags); goto new_slab; } else { @@ -2797,10 +2790,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, */ goto return_single; + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + goto load_freelist; return_single: + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + deactivate_slab(s, page, get_freepointer(s, freelist), c); local_irq_restore(flags); return freelist; -- 2.31.1