In current code, the following context always meets: local_irq_save/disable() ___slab_alloc() new_slab_objects() local_irq_restore/enable() This context ensures cpu will continue running until it finish this job before yield its control, which means the cpu_slab retrieved in new_slab_objects() is the same as passed in. Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx> --- mm/slub.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index ce2b9e5cea77..11e49d95e0ac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2402,10 +2402,9 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) } static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, - int node, struct kmem_cache_cpu **pc) + int node, struct kmem_cache_cpu *c) { void *freelist; - struct kmem_cache_cpu *c = *pc; struct page *page; WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); @@ -2417,7 +2416,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, page = new_slab(s, flags, node); if (page) { - c = raw_cpu_ptr(s->cpu_slab); if (c->page) flush_slab(s, c); @@ -2430,7 +2428,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, stat(s, ALLOC_SLAB); c->page = page; - *pc = c; } else freelist = NULL; @@ -2567,7 +2564,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto redo; } - freelist = new_slab_objects(s, gfpflags, node, &c); + freelist = new_slab_objects(s, gfpflags, node, c); if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); -- 2.15.1