Adds a little bit of type safety. Convert the one caller. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/slub.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index fd04aa96602c..827196f0aee5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2816,32 +2816,32 @@ static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags) } /* - * Check the page->freelist of a page and either transfer the freelist to the - * per cpu freelist or deactivate the page. + * Check the freelist of a slab and either transfer the freelist to the + * per cpu freelist or deactivate the slab * - * The page is still frozen if the return value is not NULL. + * The slab is still frozen if the return value is not NULL. * - * If this function returns NULL then the page has been unfrozen. + * If this function returns NULL then the slab has been unfrozen. */ -static inline void *get_freelist(struct kmem_cache *s, struct page *page) +static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) { - struct page new; + struct slab new; unsigned long counters; void *freelist; lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); do { - freelist = page->freelist; - counters = page->counters; + freelist = slab->freelist; + counters = slab->counters; new.counters = counters; VM_BUG_ON(!new.frozen); - new.inuse = page->objects; + new.inuse = slab->objects; new.frozen = freelist != NULL; - } while (!__cmpxchg_double_slab(s, page, + } while (!__cmpxchg_double_slab(s, slab_page(slab), freelist, counters, NULL, new.counters, "get_freelist")); @@ -2924,7 +2924,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (freelist) goto load_freelist; - freelist = get_freelist(s, slab_page(slab)); + freelist = get_freelist(s, slab); if (!freelist) { c->slab = NULL; -- 2.32.0