From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, slub: dissolve new_slab_objects() into ___slab_alloc() The later patches will need more fine grained control over individual actions in ___slab_alloc(), the only caller of new_slab_objects(), so dissolve it there. This is a preparatory step with no functional change. The only minor change is moving WARN_ON_ONCE() for using a constructor together with __GFP_ZERO to new_slab(), which makes it somewhat less frequent, but still able to catch a development change introducing a systematic misuse. Link: https://lkml.kernel.org/r/20210904105003.11688-8-vbabka@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Acked-by: Christoph Lameter <cl@xxxxxxxxx> Acked-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: Qian Cai <quic_qiancai@xxxxxxxxxxx> Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slub.c | 50 ++++++++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 32 deletions(-) --- a/mm/slub.c~mm-slub-dissolve-new_slab_objects-into-___slab_alloc +++ a/mm/slub.c @@ -1885,6 +1885,8 @@ static struct page *new_slab(struct kmem if (unlikely(flags & GFP_SLAB_BUG_MASK)) flags = kmalloc_fix_flags(flags); + WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); + return allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); } @@ -2610,36 +2612,6 @@ slab_out_of_memory(struct kmem_cache *s, #endif } -static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, - int node, struct kmem_cache_cpu **pc) -{ - void *freelist = NULL; - struct kmem_cache_cpu *c = *pc; - struct page *page; - - WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); - - page = new_slab(s, flags, node); - if (page) { - c = raw_cpu_ptr(s->cpu_slab); - if (c->page) - flush_slab(s, c); - - /* - * No other reference to the page yet so we can - * muck around with it freely without cmpxchg - */ - freelist = page->freelist; - page->freelist = NULL; - - stat(s, ALLOC_SLAB); - c->page = page; - *pc = c; - } - - return freelist; -} - static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) { if (unlikely(PageSlabPfmemalloc(page))) @@ -2786,13 +2758,27 @@ new_slab: if (freelist) goto check_new_page; - freelist = new_slab_objects(s, gfpflags, node, &c); + page = new_slab(s, gfpflags, node); - if (unlikely(!freelist)) { + if (unlikely(!page)) { slab_out_of_memory(s, gfpflags, node); return NULL; } + c = raw_cpu_ptr(s->cpu_slab); + if (c->page) + flush_slab(s, c); + + /* + * No other reference to the page yet so we can + * muck around with it freely without cmpxchg + */ + freelist = page->freelist; + page->freelist = NULL; + + stat(s, ALLOC_SLAB); + c->page = page; + check_new_page: page = c->page; if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) _