Dropping the page field is possible since the page struct address of an object or a freelist pointer can now always be calcualted from the address. No freelist pointer will be NULL anymore so use NULL to signify the condition that the current cpu has no percpu slab attached to it. Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> Index: linux/include/linux/slub_def.h =================================================================== --- linux.orig/include/linux/slub_def.h +++ linux/include/linux/slub_def.h @@ -40,7 +40,6 @@ enum stat_item { struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ - struct page *page; /* The slab from which we are allocating */ struct page *partial; /* Partially allocated frozen slabs */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -1611,7 +1611,6 @@ static void *get_partial_node(struct kme available += objects; if (!object) { - c->page = page; stat(s, ALLOC_FROM_PARTIAL); object = t; } else { @@ -2049,10 +2048,9 @@ static void put_cpu_partial(struct kmem_ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { stat(s, CPUSLAB_FLUSH); - deactivate_slab(s, c->page, c->freelist); + deactivate_slab(s, virt_to_head_page(c->freelist), c->freelist); c->tid = next_tid(c->tid); - c->page = NULL; c->freelist = NULL; } @@ -2066,7 +2064,7 @@ static inline void __flush_cpu_slab(stru struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); if (likely(c)) { - if (c->page) + if (c->freelist) flush_slab(s, c); unfreeze_partials(s, c); @@ -2085,7 +2083,7 @@ static bool has_cpu_slab(int cpu, void * struct kmem_cache *s = info; struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); - return c->page || c->partial; + return c->freelist || c->partial; } static void flush_all(struct kmem_cache *s) @@ -2186,7 +2184,7 @@ static inline void *new_slab_objects(str page = new_slab(s, flags, node); if (page) { c = raw_cpu_ptr(s->cpu_slab); - if (c->page) + if (c->freelist) flush_slab(s, c); /* @@ -2197,7 +2195,6 @@ static inline void *new_slab_objects(str page->freelist = end_token(freelist); stat(s, ALLOC_SLAB); - c->page = page; *pc = c; } else freelist = NULL; @@ -2280,9 +2277,10 @@ static void *__slab_alloc(struct kmem_ca c = this_cpu_ptr(s->cpu_slab); #endif - page = c->page; - if (!page) + if (!c->freelist || is_end_token(c->freelist)) goto new_slab; + + page = virt_to_head_page(c->freelist); redo: if (unlikely(!node_match(page, node))) { @@ -2311,7 +2309,7 @@ redo: freelist = get_freelist(s, page); if (!freelist || is_end_token(freelist)) { - c->page = NULL; + c->freelist = NULL; stat(s, DEACTIVATE_BYPASS); goto new_slab; } @@ -2324,7 +2322,7 @@ load_freelist: * page is pointing to the page from which the objects are obtained. * That page must be frozen for per cpu allocations to work. */ - VM_BUG_ON(!c->page->frozen); + VM_BUG_ON(!virt_to_head_page(freelist)->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_irq_restore(flags); @@ -2332,16 +2330,15 @@ load_freelist: deactivate: deactivate_slab(s, page, c->freelist); - c->page = NULL; c->freelist = NULL; new_slab: if (c->partial) { - page = c->page = c->partial; + page = c->partial; c->partial = page->next; stat(s, CPU_PARTIAL_ALLOC); - c->freelist = NULL; + c->freelist = end_token(page_address(page)); goto redo; } @@ -2353,7 +2350,7 @@ new_slab: return NULL; } - page = c->page; + page = virt_to_head_page(freelist); if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) goto load_freelist; @@ -2363,7 +2360,6 @@ new_slab: goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist)); - c->page = NULL; c->freelist = NULL; local_irq_restore(flags); return freelist; @@ -2384,7 +2380,6 @@ static __always_inline void *slab_alloc_ { void **object; struct kmem_cache_cpu *c; - struct page *page; unsigned long tid; if (slab_pre_alloc_hook(s, gfpflags)) @@ -2416,8 +2411,7 @@ redo: preempt_enable(); object = c->freelist; - page = c->page; - if (unlikely(!object || is_end_token(object) || !node_match(page, node))) { + if (unlikely(!object || is_end_token(object) || !node_match(virt_to_head_page(object), node))) { object = __slab_alloc(s, gfpflags, node, addr, c); stat(s, ALLOC_SLOWPATH); } else { @@ -2665,7 +2659,7 @@ redo: tid = c->tid; preempt_enable(); - if (likely(page == c->page)) { + if (likely(c->freelist && page == virt_to_head_page(c->freelist))) { set_freepointer(s, object, c->freelist); if (unlikely(!this_cpu_cmpxchg_double( @@ -4191,10 +4185,10 @@ static ssize_t show_slab_objects(struct int node; struct page *page; - page = ACCESS_ONCE(c->page); - if (!page) + if (!c->freelist) continue; + page = virt_to_head_page(c->freelist); node = page_to_nid(page); if (flags & SO_TOTAL) x = page->objects; -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>