On Tue, Nov 16, 2021 at 01:16:20AM +0100, Vlastimil Babka wrote: > @@ -411,12 +412,12 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) > * !PageSlab() when the size provided to kmalloc is larger than > * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. > */ > - if (unlikely(!PageSlab(page))) { > + if (unlikely(!folio_test_slab(folio))) { > if (____kasan_kfree_large(ptr, ip)) > return; > - kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); > + kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false); > } else { > - ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); > + ____kasan_slab_free(folio_slab(folio)->slab_cache, ptr, ip, false, false); I'd avoid this long line by doing: struct slab *slab = folio_slab(folio); ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);