The patch titled Subject: kasan: make kasan_cache_create() work with 32-bit slab cache sizes has been added to the -mm tree. Its filename is kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Alexey Dobriyan <adobriyan@xxxxxxxxx> Subject: kasan: make kasan_cache_create() work with 32-bit slab cache sizes If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not do it as well. Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@xxxxxxxxx Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 4 ++-- mm/kasan/kasan.c | 12 ++++++------ mm/slab.c | 2 +- mm/slub.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff -puN include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes include/linux/kasan.h --- a/include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/include/linux/kasan.h @@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(co void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(str static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff -puN mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/kasan/kasan.c --- a/mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/kasan/kasan.c @@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. */ -static size_t optimal_redzone(size_t object_size) +static unsigned int optimal_redzone(unsigned int object_size) { - int rz = + return object_size <= 64 - 16 ? 16 : object_size <= 128 - 32 ? 32 : object_size <= 512 - 64 ? 64 : @@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t obj object_size <= (1 << 14) - 256 ? 256 : object_size <= (1 << 15) - 512 ? 512 : object_size <= (1 << 16) - 1024 ? 1024 : 2048; - return rz; } -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) { + unsigned int orig_size = *size; int redzone_adjust; - int orig_size = *size; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; @@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cach if (redzone_adjust > 0) *size += redzone_adjust; - *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + + *size = min_t(unsigned int, KMALLOC_MAX_SIZE, + max(*size, cache->object_size + optimal_redzone(cache->object_size))); /* diff -puN mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slab.c --- a/mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/slab.c @@ -1993,7 +1993,7 @@ int __kmem_cache_create(struct kmem_cach size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; - size_t size = cachep->size; + unsigned int size = cachep->size; #if DEBUG #if FORCED_DEBUG diff -puN mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slub.c --- a/mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/slub.c @@ -3457,7 +3457,7 @@ static void set_cpu_partial(struct kmem_ static int calculate_sizes(struct kmem_cache *s, int forced_order) { slab_flags_t flags = s->flags; - size_t size = s->object_size; + unsigned int size = s->object_size; int order; /* _ Patches currently in -mm which might be from adobriyan@xxxxxxxxx are slab-mark-kmalloc-machinery-as-__ro_after_init.patch slab-fixup-calculate_alignment-argument-type.patch slab-make-kmalloc_index-return-unsigned-int.patch slab-make-kmalloc_size-return-unsigned-int.patch slab-make-create_kmalloc_cache-work-with-32-bit-sizes.patch slab-make-create_boot_cache-work-with-32-bit-sizes.patch slab-make-kmem_cache_create-work-with-32-bit-sizes.patch slab-make-size_index-array-u8.patch slab-make-size_index_elem-unsigned-int.patch slub-make-remote_node_defrag_ratio-unsigned-int.patch slub-make-max_attr_size-unsigned-int.patch slub-make-red_left_pad-unsigned-int.patch slub-make-reserved-unsigned-int.patch slub-make-align-unsigned-int.patch slub-make-inuse-unsigned-int.patch slub-make-cpu_partial-unsigned-int.patch slub-make-offset-unsigned-int.patch slub-make-object_size-unsigned-int.patch slub-make-size-unsigned-int.patch slab-make-kmem_cache_flags-accept-32-bit-object-size.patch kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch slab-make-usercopy-region-32-bit.patch slub-make-slab_index-return-unsigned-int.patch slub-make-struct-kmem_cache_order_objects-x-unsigned-int.patch slub-make-size_from_object-return-unsigned-int.patch slab-use-32-bit-arithmetic-in-freelist_randomize.patch proc-do-less-stuff-under-pde_unload_lock.patch proc-move-proc-sysvipc-creation-to-where-it-belongs.patch proc-faster-open-close-of-files-without-release-hook.patch proc-randomize-struct-pde_opener.patch proc-move-struct-pde_opener-to-kmem-cache.patch proc-account-struct-pde_opener.patch proc-check-permissions-earlier-for-proc-wchan.patch proc-use-set_puts-at-proc-wchan.patch proc-test-proc-self-wchan.patch proc-test-proc-self-syscall.patch uts-create-struct-uts_namespace-from-kmem_cache.patch seq_file-delete-small-value-optimization.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html