Add a little type safety. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/slub.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index ea7f8d9716e0..875f3f6c1ae6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4241,23 +4241,23 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { LIST_HEAD(discard); - struct page *page, *h; + struct slab *slab, *h; BUG_ON(irqs_disabled()); spin_lock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &n->partial, slab_list) { - if (!page->inuse) { - remove_partial(n, page); - list_add(&page->slab_list, &discard); + list_for_each_entry_safe(slab, h, &n->partial, slab_list) { + if (!slab->inuse) { + remove_partial(n, slab_page(slab)); + list_add(&slab->slab_list, &discard); } else { - list_slab_objects(s, page, + list_slab_objects(s, slab_page(slab), "Objects remaining in %s on __kmem_cache_shutdown()"); } } spin_unlock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &discard, slab_list) - discard_slab(s, page); + list_for_each_entry_safe(slab, h, &discard, slab_list) + discard_slab(s, slab_page(slab)); } bool __kmem_cache_empty(struct kmem_cache *s) -- 2.32.0