To avoid converting from page to slab, we have to convert all these functions at once. Adds a little type-safety. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/slub_def.h | 4 +- mm/slub.c | 208 +++++++++++++++++++-------------------- 2 files changed, 106 insertions(+), 106 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 85499f0586b0..3cc64e9f988c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -48,9 +48,9 @@ enum stat_item { struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ - struct page *page; /* The slab from which we are allocating */ + struct slab *slab; /* The slab from which we are allocating */ #ifdef CONFIG_SLUB_CPU_PARTIAL - struct page *partial; /* Partially allocated frozen slabs */ + struct slab *partial; /* Partially allocated frozen slabs */ #endif local_lock_t lock; /* Protects the fields above */ #ifdef CONFIG_SLUB_STATS diff --git a/mm/slub.c b/mm/slub.c index 41c4ccd67d95..d849b644d0ed 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2084,9 +2084,9 @@ static inline void *acquire_slab(struct kmem_cache *s, } #ifdef CONFIG_SLUB_CPU_PARTIAL -static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); +static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); #else -static inline void put_cpu_partial(struct kmem_cache *s, struct page *page, +static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) { } #endif static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); @@ -2095,9 +2095,9 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); * Try to allocate a partial slab from a specific node. */ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - struct page **ret_page, gfp_t gfpflags) + struct slab **ret_slab, gfp_t gfpflags) { - struct page *page, *page2; + struct slab *slab, *slab2; void *object = NULL; unsigned int available = 0; unsigned long flags; @@ -2113,23 +2113,23 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, return NULL; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry_safe(page, page2, &n->partial, slab_list) { + list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { void *t; - if (!pfmemalloc_match(page, gfpflags)) + if (!pfmemalloc_match(slab_page(slab), gfpflags)) continue; - t = acquire_slab(s, n, page, object == NULL, &objects); + t = acquire_slab(s, n, slab_page(slab), object == NULL, &objects); if (!t) break; available += objects; if (!object) { - *ret_page = page; + *ret_slab = slab; stat(s, ALLOC_FROM_PARTIAL); object = t; } else { - put_cpu_partial(s, page, 0); + put_cpu_partial(s, slab, 0); stat(s, CPU_PARTIAL_NODE); } if (!kmem_cache_has_cpu_partial(s) @@ -2142,10 +2142,10 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, } /* - * Get a page from somewhere. Search in increasing NUMA distances. + * Get a slab from somewhere. Search in increasing NUMA distances. */ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, - struct page **ret_page) + struct slab **ret_slab) { #ifdef CONFIG_NUMA struct zonelist *zonelist; @@ -2187,7 +2187,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, if (n && cpuset_zone_allowed(zone, flags) && n->nr_partial > s->min_partial) { - object = get_partial_node(s, n, ret_page, flags); + object = get_partial_node(s, n, ret_slab, flags); if (object) { /* * Don't check read_mems_allowed_retry() @@ -2206,10 +2206,10 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, } /* - * Get a partial page, lock it and return it. + * Get a partial slab, lock it and return it. */ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, - struct page **ret_page) + struct slab **ret_slab) { void *object; int searchnode = node; @@ -2217,11 +2217,11 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, if (node == NUMA_NO_NODE) searchnode = numa_mem_id(); - object = get_partial_node(s, get_node(s, searchnode), ret_page, flags); + object = get_partial_node(s, get_node(s, searchnode), ret_slab, flags); if (object || node != NUMA_NO_NODE) return object; - return get_any_partial(s, flags, ret_page); + return get_any_partial(s, flags, ret_slab); } #ifdef CONFIG_PREEMPTION @@ -2506,7 +2506,7 @@ static void unfreeze_partials(struct kmem_cache *s) unsigned long flags; local_lock_irqsave(&s->cpu_slab->lock, flags); - partial_page = this_cpu_read(s->cpu_slab->partial); + partial_page = slab_page(this_cpu_read(s->cpu_slab->partial)); this_cpu_write(s->cpu_slab->partial, NULL); local_unlock_irqrestore(&s->cpu_slab->lock, flags); @@ -2519,7 +2519,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s, { struct page *partial_page; - partial_page = slub_percpu_partial(c); + partial_page = slab_page(slub_percpu_partial(c)); c->partial = NULL; if (partial_page) @@ -2527,52 +2527,52 @@ static void unfreeze_partials_cpu(struct kmem_cache *s, } /* - * Put a page that was just frozen (in __slab_free|get_partial_node) into a - * partial page slot if available. + * Put a slab that was just frozen (in __slab_free|get_partial_node) into a + * partial slab slot if available. * * If we did not find a slot then simply move all the partials to the * per node partial list. */ -static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) +static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) { - struct page *oldpage; - struct page *page_to_unfreeze = NULL; + struct slab *oldslab; + struct slab *slab_to_unfreeze = NULL; unsigned long flags; - int pages = 0; + int slabs = 0; int pobjects = 0; local_lock_irqsave(&s->cpu_slab->lock, flags); - oldpage = this_cpu_read(s->cpu_slab->partial); + oldslab = this_cpu_read(s->cpu_slab->partial); - if (oldpage) { - if (drain && oldpage->pobjects > slub_cpu_partial(s)) { + if (oldslab) { + if (drain && oldslab->pobjects > slub_cpu_partial(s)) { /* * Partial array is full. Move the existing set to the * per node partial list. Postpone the actual unfreezing * outside of the critical section. */ - page_to_unfreeze = oldpage; - oldpage = NULL; + slab_to_unfreeze = oldslab; + oldslab = NULL; } else { - pobjects = oldpage->pobjects; - pages = oldpage->pages; + pobjects = oldslab->pobjects; + slabs = oldslab->slabs; } } - pages++; - pobjects += page->objects - page->inuse; + slabs++; + pobjects += slab->objects - slab->inuse; - page->pages = pages; - page->pobjects = pobjects; - page->next = oldpage; + slab->slabs = slabs; + slab->pobjects = pobjects; + slab->next = oldslab; - this_cpu_write(s->cpu_slab->partial, page); + this_cpu_write(s->cpu_slab->partial, slab); local_unlock_irqrestore(&s->cpu_slab->lock, flags); - if (page_to_unfreeze) { - __unfreeze_partials(s, page_to_unfreeze); + if (slab_to_unfreeze) { + __unfreeze_partials(s, slab_page(slab_to_unfreeze)); stat(s, CPU_PARTIAL_DRAIN); } } @@ -2593,10 +2593,10 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) local_lock_irqsave(&s->cpu_slab->lock, flags); - page = c->page; + page = slab_page(c->slab); freelist = c->freelist; - c->page = NULL; + c->slab = NULL; c->freelist = NULL; c->tid = next_tid(c->tid); @@ -2612,9 +2612,9 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); void *freelist = c->freelist; - struct page *page = c->page; + struct page *page = slab_page(c->slab); - c->page = NULL; + c->slab = NULL; c->freelist = NULL; c->tid = next_tid(c->tid); @@ -2648,7 +2648,7 @@ static void flush_cpu_slab(struct work_struct *w) s = sfw->s; c = this_cpu_ptr(s->cpu_slab); - if (c->page) + if (c->slab) flush_slab(s, c); unfreeze_partials(s); @@ -2658,7 +2658,7 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s) { struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); - return c->page || slub_percpu_partial(c); + return c->slab || slub_percpu_partial(c); } static DEFINE_MUTEX(flush_lock); @@ -2872,15 +2872,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { void *freelist; - struct page *page; + struct slab *slab; unsigned long flags; stat(s, ALLOC_SLOWPATH); -reread_page: +reread_slab: - page = READ_ONCE(c->page); - if (!page) { + slab = READ_ONCE(c->slab); + if (!slab) { /* * if the node is not online or has no normal memory, just * ignore the node constraint @@ -2892,7 +2892,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } redo: - if (unlikely(!node_match(page, node))) { + if (unlikely(!node_match(slab_page(slab), node))) { /* * same as above but node_match() being false already * implies node != NUMA_NO_NODE @@ -2907,27 +2907,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } /* - * By rights, we should be searching for a slab page that was - * PFMEMALLOC but right now, we are losing the pfmemalloc + * By rights, we should be searching for a slab that was + * PFMEMALLOC but right now, we lose the pfmemalloc * information when the page leaves the per-cpu allocator */ - if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags))) + if (unlikely(!pfmemalloc_match_unsafe(slab_page(slab), gfpflags))) goto deactivate_slab; - /* must check again c->page in case we got preempted and it changed */ + /* must check again c->slab in case we got preempted and it changed */ local_lock_irqsave(&s->cpu_slab->lock, flags); - if (unlikely(page != c->page)) { + if (unlikely(slab != c->slab)) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); - goto reread_page; + goto reread_slab; } freelist = c->freelist; if (freelist) goto load_freelist; - freelist = get_freelist(s, page); + freelist = get_freelist(s, slab_page(slab)); if (!freelist) { - c->page = NULL; + c->slab = NULL; local_unlock_irqrestore(&s->cpu_slab->lock, flags); stat(s, DEACTIVATE_BYPASS); goto new_slab; @@ -2941,10 +2941,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, /* * freelist is pointing to the list of objects to be used. - * page is pointing to the page from which the objects are obtained. - * That page must be frozen for per cpu allocations to work. + * slab is pointing to the slab from which the objects are obtained. + * That slab must be frozen for per cpu allocations to work. */ - VM_BUG_ON(!c->page->frozen); + VM_BUG_ON(!c->slab->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); @@ -2953,23 +2953,23 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, deactivate_slab: local_lock_irqsave(&s->cpu_slab->lock, flags); - if (page != c->page) { + if (slab != c->slab) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); - goto reread_page; + goto reread_slab; } freelist = c->freelist; - c->page = NULL; + c->slab = NULL; c->freelist = NULL; local_unlock_irqrestore(&s->cpu_slab->lock, flags); - deactivate_slab(s, page, freelist); + deactivate_slab(s, slab_page(slab), freelist); new_slab: if (slub_percpu_partial(c)) { local_lock_irqsave(&s->cpu_slab->lock, flags); - if (unlikely(c->page)) { + if (unlikely(c->slab)) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); - goto reread_page; + goto reread_slab; } if (unlikely(!slub_percpu_partial(c))) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); @@ -2977,8 +2977,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto new_objects; } - page = c->page = slub_percpu_partial(c); - slub_set_percpu_partial(c, page); + slab = c->slab = slub_percpu_partial(c); + slub_set_percpu_partial(c, slab); local_unlock_irqrestore(&s->cpu_slab->lock, flags); stat(s, CPU_PARTIAL_ALLOC); goto redo; @@ -2986,32 +2986,32 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, new_objects: - freelist = get_partial(s, gfpflags, node, &page); + freelist = get_partial(s, gfpflags, node, &slab); if (freelist) - goto check_new_page; + goto check_new_slab; slub_put_cpu_ptr(s->cpu_slab); - page = slab_page(new_slab(s, gfpflags, node)); + slab = new_slab(s, gfpflags, node); c = slub_get_cpu_ptr(s->cpu_slab); - if (unlikely(!page)) { + if (unlikely(!slab)) { slab_out_of_memory(s, gfpflags, node); return NULL; } /* - * No other reference to the page yet so we can + * No other reference to the slab yet so we can * muck around with it freely without cmpxchg */ - freelist = page->freelist; - page->freelist = NULL; + freelist = slab->freelist; + slab->freelist = NULL; stat(s, ALLOC_SLAB); -check_new_page: +check_new_slab: if (kmem_cache_debug(s)) { - if (!alloc_debug_processing(s, page, freelist, addr)) { + if (!alloc_debug_processing(s, slab_page(slab), freelist, addr)) { /* Slab failed checks. Next slab needed */ goto new_slab; } else { @@ -3023,39 +3023,39 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } } - if (unlikely(!pfmemalloc_match(page, gfpflags))) + if (unlikely(!pfmemalloc_match(slab_page(slab), gfpflags))) /* * For !pfmemalloc_match() case we don't load freelist so that * we don't make further mismatched allocations easier. */ goto return_single; -retry_load_page: +retry_load_slab: local_lock_irqsave(&s->cpu_slab->lock, flags); - if (unlikely(c->page)) { + if (unlikely(c->slab)) { void *flush_freelist = c->freelist; - struct page *flush_page = c->page; + struct slab *flush_slab = c->slab; - c->page = NULL; + c->slab = NULL; c->freelist = NULL; c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); - deactivate_slab(s, flush_page, flush_freelist); + deactivate_slab(s, slab_page(flush_slab), flush_freelist); stat(s, CPUSLAB_FLUSH); - goto retry_load_page; + goto retry_load_slab; } - c->page = page; + c->slab = slab; goto load_freelist; return_single: - deactivate_slab(s, page, get_freepointer(s, freelist)); + deactivate_slab(s, slab_page(slab), get_freepointer(s, freelist)); return freelist; } @@ -3159,7 +3159,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, */ object = c->freelist; - page = c->page; + page = slab_page(c->slab); /* * We cannot use the lockless fastpath on PREEMPT_RT because if a * slowpath has taken the local_lock_irqsave(), it is not protected @@ -3351,7 +3351,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, * If we just froze the slab then put it onto the * per cpu partial list. */ - put_cpu_partial(s, slab_page(slab), 1); + put_cpu_partial(s, slab, 1); stat(s, CPU_PARTIAL_FREE); } @@ -3427,7 +3427,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s, /* Same with comment on barrier() in slab_alloc_node() */ barrier(); - if (likely(slab_page(slab) == c->page)) { + if (likely(slab == c->slab)) { #ifndef CONFIG_PREEMPT_RT void **freelist = READ_ONCE(c->freelist); @@ -3453,7 +3453,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s, local_lock(&s->cpu_slab->lock); c = this_cpu_ptr(s->cpu_slab); - if (unlikely(slab_page(slab) != c->page)) { + if (unlikely(slab != c->slab)) { local_unlock(&s->cpu_slab->lock); goto redo; } @@ -5221,7 +5221,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, int node; struct page *page; - page = READ_ONCE(c->page); + page = slab_page(READ_ONCE(c->slab)); if (!page) continue; @@ -5236,7 +5236,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; - page = slub_percpu_partial_read_once(c); + page = slab_page(slub_percpu_partial_read_once(c)); if (page) { node = page_to_nid(page); if (flags & SO_TOTAL) @@ -5441,31 +5441,31 @@ SLAB_ATTR_RO(objects_partial); static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) { int objects = 0; - int pages = 0; + int slabs = 0; int cpu; int len = 0; for_each_online_cpu(cpu) { - struct page *page; + struct slab *slab; - page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); + slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); - if (page) { - pages += page->pages; - objects += page->pobjects; + if (slab) { + slabs += slab->slabs; + objects += slab->pobjects; } } - len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); + len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); #ifdef CONFIG_SMP for_each_online_cpu(cpu) { - struct page *page; + struct slab *slab; - page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); - if (page) + slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); + if (slab) len += sysfs_emit_at(buf, len, " C%d=%d(%d)", - cpu, page->pobjects, page->pages); + cpu, slab->pobjects, slab->slabs); } #endif len += sysfs_emit_at(buf, len, "\n"); -- 2.32.0