Use new is_kmalloc_cache() to simplify the code of checking whether a kmem_cache is a kmalloc cache. Signed-off-by: Feng Tang <feng.tang@xxxxxxxxx> --- include/linux/kasan.h | 9 --------- mm/kasan/common.c | 9 ++------- mm/slab_common.c | 1 - 3 files changed, 2 insertions(+), 17 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index dff604912687..fc46f5d6f404 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -102,7 +102,6 @@ struct kasan_cache { int alloc_meta_offset; int free_meta_offset; #endif - bool is_kmalloc; }; void __kasan_unpoison_range(const void *addr, size_t size); @@ -129,13 +128,6 @@ static __always_inline bool kasan_unpoison_pages(struct page *page, return false; } -void __kasan_cache_create_kmalloc(struct kmem_cache *cache); -static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) -{ - if (kasan_enabled()) - __kasan_cache_create_kmalloc(cache); -} - void __kasan_poison_slab(struct slab *slab); static __always_inline void kasan_poison_slab(struct slab *slab) { @@ -252,7 +244,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) {} static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, bool init) { return false; } -static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct slab *slab) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 1f30080a7a4c..f7e0e5067e7a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -122,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init) KASAN_PAGE_FREE, init); } -void __kasan_cache_create_kmalloc(struct kmem_cache *cache) -{ - cache->kasan_info.is_kmalloc = true; -} - void __kasan_poison_slab(struct slab *slab) { struct page *page = slab_page(slab); @@ -326,7 +321,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, kasan_unpoison(tagged_object, cache->object_size, init); /* Save alloc info (if possible) for non-kmalloc() allocations. */ - if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc) + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) kasan_save_alloc_info(cache, tagged_object, flags); return tagged_object; @@ -372,7 +367,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache, * Save alloc info (if possible) for kmalloc() allocations. * This also rewrites the alloc info when called from kasan_krealloc(). */ - if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc) + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) kasan_save_alloc_info(cache, (void *)object, flags); /* Keep the tag that was set by kasan_slab_alloc(). */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 8276022f0da4..a5480d67f391 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -663,7 +663,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset, usersize); - kasan_cache_create_kmalloc(s); list_add(&s->list, &slab_caches); s->refcount = 1; return s; -- 2.34.1