The patch titled Subject: kasan: make kasan_cache_create() work with 32-bit slab cache sizes has been removed from the -mm tree. Its filename was kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: Alexey Dobriyan <adobriyan@xxxxxxxxx> Subject: kasan: make kasan_cache_create() work with 32-bit slab cache sizes If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not do it as well. Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@xxxxxxxxx Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 4 ++-- mm/kasan/kasan.c | 12 ++++++------ mm/slab.c | 2 +- mm/slub.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff -puN include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes include/linux/kasan.h --- a/include/linux/kasan.h~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/include/linux/kasan.h @@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(co void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(str static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff -puN mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/kasan/kasan.c --- a/mm/kasan/kasan.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/kasan/kasan.c @@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. */ -static size_t optimal_redzone(size_t object_size) +static unsigned int optimal_redzone(unsigned int object_size) { - int rz = + return object_size <= 64 - 16 ? 16 : object_size <= 128 - 32 ? 32 : object_size <= 512 - 64 ? 64 : @@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t obj object_size <= (1 << 14) - 256 ? 256 : object_size <= (1 << 15) - 512 ? 512 : object_size <= (1 << 16) - 1024 ? 1024 : 2048; - return rz; } -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) { + unsigned int orig_size = *size; int redzone_adjust; - int orig_size = *size; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; @@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cach if (redzone_adjust > 0) *size += redzone_adjust; - *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + + *size = min_t(unsigned int, KMALLOC_MAX_SIZE, + max(*size, cache->object_size + optimal_redzone(cache->object_size))); /* diff -puN mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slab.c --- a/mm/slab.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/slab.c @@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cach size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; - size_t size = cachep->size; + unsigned int size = cachep->size; #if DEBUG #if FORCED_DEBUG diff -puN mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes mm/slub.c --- a/mm/slub.c~kasan-make-kasan_cache_create-work-with-32-bit-slab-cache-sizes +++ a/mm/slub.c @@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_ static int calculate_sizes(struct kmem_cache *s, int forced_order) { slab_flags_t flags = s->flags; - size_t size = s->object_size; + unsigned int size = s->object_size; int order; /* _ Patches currently in -mm which might be from adobriyan@xxxxxxxxx are proc-do-less-stuff-under-pde_unload_lock.patch proc-move-proc-sysvipc-creation-to-where-it-belongs.patch proc-faster-open-close-of-files-without-release-hook.patch proc-randomize-struct-pde_opener.patch proc-move-struct-pde_opener-to-kmem-cache.patch proc-account-struct-pde_opener.patch proc-check-permissions-earlier-for-proc-wchan.patch proc-use-set_puts-at-proc-wchan.patch proc-test-proc-self-wchan.patch proc-test-proc-self-syscall.patch proc-move-struct-proc_dir_entry-into-kmem-cache.patch proc-fix-proc-map_files-lookup-some-more.patch proc-register-filesystem-last.patch proc-faster-proc-cmdline.patch proc-do-mmput-asap-for-proc-map_files.patch proc-revalidate-misc-dentries.patch proc-test-last-field-of-proc-loadavg.patch proc-reject-and-as-filenames.patch proc-switch-struct-proc_dir_entry-count-to-refcount.patch proc-shotgun-test-read-readdir-readlink-a-little-write.patch proc-shotgun-test-read-readdir-readlink-a-little-write-fix.patch proc-shotgun-test-read-readdir-readlink-a-little-write-fix-2.patch proc-use-slower-rb_first.patch proc-test-proc-uptime.patch uts-create-struct-uts_namespace-from-kmem_cache.patch seq_file-allocate-seq_file-from-kmem_cache.patch seq_file-account-everything.patch seq_file-delete-small-value-optimization.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html