Moves a few calls to slab_page() around. Gets us a step closer to allowing deactivate_slab() to take a slab instead of a page. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index e6c363d8de22..f33a196fe64f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2588,12 +2588,12 @@ static inline void unfreeze_partials_cpu(struct kmem_cache *s, static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { unsigned long flags; - struct page *page; + struct slab *slab; void *freelist; local_lock_irqsave(&s->cpu_slab->lock, flags); - page = slab_page(c->slab); + slab = c->slab; freelist = c->freelist; c->slab = NULL; @@ -2602,8 +2602,8 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) local_unlock_irqrestore(&s->cpu_slab->lock, flags); - if (page) { - deactivate_slab(s, page, freelist); + if (slab) { + deactivate_slab(s, slab_page(slab), freelist); stat(s, CPUSLAB_FLUSH); } } @@ -2612,14 +2612,14 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); void *freelist = c->freelist; - struct page *page = slab_page(c->slab); + struct slab *slab = c->slab; c->slab = NULL; c->freelist = NULL; c->tid = next_tid(c->tid); - if (page) { - deactivate_slab(s, page, freelist); + if (slab) { + deactivate_slab(s, slab_page(slab), freelist); stat(s, CPUSLAB_FLUSH); } -- 2.32.0