Provide a little more typesafety and also convert free_debug_processing() to take a struct slab. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/slub.c | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 15996ea165ac..0a566a03d424 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1342,21 +1342,21 @@ static inline int free_consistency_checks(struct kmem_cache *s, /* Supports checking bulk free of a constructed freelist */ static noinline int free_debug_processing( - struct kmem_cache *s, struct page *page, + struct kmem_cache *s, struct slab *slab, void *head, void *tail, int bulk_cnt, unsigned long addr) { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + struct kmem_cache_node *n = get_node(s, slab_nid(slab)); void *object = head; int cnt = 0; unsigned long flags, flags2; int ret = 0; spin_lock_irqsave(&n->list_lock, flags); - slab_lock(page, &flags2); + slab_lock(slab_page(slab), &flags2); if (s->flags & SLAB_CONSISTENCY_CHECKS) { - if (!check_slab(s, page)) + if (!check_slab(s, slab_page(slab))) goto out; } @@ -1364,13 +1364,13 @@ static noinline int free_debug_processing( cnt++; if (s->flags & SLAB_CONSISTENCY_CHECKS) { - if (!free_consistency_checks(s, page, object, addr)) + if (!free_consistency_checks(s, slab_page(slab), object, addr)) goto out; } if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); - trace(s, page, object, 0); + trace(s, slab_page(slab), object, 0); /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ init_object(s, object, SLUB_RED_INACTIVE); @@ -1383,10 +1383,10 @@ static noinline int free_debug_processing( out: if (cnt != bulk_cnt) - slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", + slab_err(s, slab_page(slab), "Bulk freelist count(%d) invalid(%d)\n", bulk_cnt, cnt); - slab_unlock(page, &flags2); + slab_unlock(slab_page(slab), &flags2); spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); @@ -1609,7 +1609,7 @@ static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } static inline int free_debug_processing( - struct kmem_cache *s, struct page *page, + struct kmem_cache *s, struct slab *slab, void *head, void *tail, int bulk_cnt, unsigned long addr) { return 0; } @@ -3270,17 +3270,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); * have a longer lifetime than the cpu slabs in most processing loads. * * So we still attempt to reduce cache line usage. Just take the slab - * lock and free the item. If there is no additional partial page + * lock and free the item. If there is no additional partial slab * handling required then we can return immediately. */ -static void __slab_free(struct kmem_cache *s, struct page *page, +static void __slab_free(struct kmem_cache *s, struct slab *slab, void *head, void *tail, int cnt, unsigned long addr) { void *prior; int was_frozen; - struct page new; + struct slab new; unsigned long counters; struct kmem_cache_node *n = NULL; unsigned long flags; @@ -3291,7 +3291,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; if (kmem_cache_debug(s) && - !free_debug_processing(s, page, head, tail, cnt, addr)) + !free_debug_processing(s, slab, head, tail, cnt, addr)) return; do { @@ -3299,8 +3299,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } - prior = page->freelist; - counters = page->counters; + prior = slab->freelist; + counters = slab->counters; set_freepointer(s, tail, prior); new.counters = counters; was_frozen = new.frozen; @@ -3319,7 +3319,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } else { /* Needs to be taken off a list */ - n = get_node(s, page_to_nid(page)); + n = get_node(s, slab_nid(slab)); /* * Speculatively acquire the list_lock. * If the cmpxchg does not succeed then we may @@ -3333,7 +3333,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } } - } while (!cmpxchg_double_slab(s, page, + } while (!cmpxchg_double_slab(s, slab_page(slab), prior, counters, head, new.counters, "__slab_free")); @@ -3348,10 +3348,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_FROZEN); } else if (new.frozen) { /* - * If we just froze the page then put it onto the + * If we just froze the slab then put it onto the * per cpu partial list. */ - put_cpu_partial(s, page, 1); + put_cpu_partial(s, slab_page(slab), 1); stat(s, CPU_PARTIAL_FREE); } @@ -3366,8 +3366,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * then add it. */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { - remove_full(s, n, page); - add_partial(n, page, DEACTIVATE_TO_TAIL); + remove_full(s, n, slab_page(slab)); + add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } spin_unlock_irqrestore(&n->list_lock, flags); @@ -3378,16 +3378,16 @@ static void __slab_free(struct kmem_cache *s, struct page *page, /* * Slab on the partial list. */ - remove_partial(n, page); + remove_partial(n, slab_page(slab)); stat(s, FREE_REMOVE_PARTIAL); } else { /* Slab must be on the full list */ - remove_full(s, n, page); + remove_full(s, n, slab_page(slab)); } spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); - discard_slab(s, page); + discard_slab(s, slab_page(slab)); } /* @@ -3468,7 +3468,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s, #endif stat(s, FREE_FASTPATH); } else - __slab_free(s, slab_page(slab), head, tail_obj, cnt, addr); + __slab_free(s, slab, head, tail_obj, cnt, addr); } @@ -4536,7 +4536,7 @@ void kfree(const void *x) return; slab = virt_to_slab(x); - if (unlikely(!SlabAllocation(slab))) { + if (unlikely(!slab_test_cache(slab))) { free_nonslab_page(slab_page(slab), object); return; } -- 2.32.0