If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not do it as well. Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx> --- include/linux/kasan.h | 4 ++-- mm/kasan/kasan.c | 4 ++-- mm/slab.c | 2 +- mm/slub.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e3eb834c9a35..d0a05e0f9e8e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -45,7 +45,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -94,7 +94,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 405bba487df5..0bb95f6a1b7b 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -336,11 +336,11 @@ static size_t optimal_redzone(size_t object_size) return rz; } -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) { + unsigned int orig_size = *size; int redzone_adjust; - int orig_size = *size; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; diff --git a/mm/slab.c b/mm/slab.c index 15da0e177d7b..328b9b705981 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1999,7 +1999,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; - size_t size = cachep->size; + unsigned int size = cachep->size; #if DEBUG #if FORCED_DEBUG diff --git a/mm/slub.c b/mm/slub.c index cd09ae2c48e8..ce71665e266c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3457,7 +3457,7 @@ static void set_cpu_partial(struct kmem_cache *s) static int calculate_sizes(struct kmem_cache *s, int forced_order) { slab_flags_t flags = s->flags; - size_t size = s->object_size; + unsigned int size = s->object_size; int order; /* -- 2.13.6 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>