The later patches will need more fine grained control over individual actions in ___slab_alloc(), the only caller of new_slab_objects(), so this is a first preparatory step with no functional change. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 7b4cdc59b9ff..7973bcd42bc7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2574,17 +2574,12 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu **pc) { - void *freelist; + void *freelist = NULL; struct kmem_cache_cpu *c = *pc; struct page *page; WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); - freelist = get_partial(s, flags, node, c); - - if (freelist) - return freelist; - page = new_slab(s, flags, node); if (page) { c = raw_cpu_ptr(s->cpu_slab); @@ -2748,6 +2743,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto redo; } + freelist = get_partial(s, gfpflags, node, c); + if (freelist) + goto check_new_page; + freelist = new_slab_objects(s, gfpflags, node, &c); if (unlikely(!freelist)) { @@ -2755,6 +2754,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, return NULL; } +check_new_page: page = c->page; if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) goto load_freelist; -- 2.31.1