Since GCC cannot apply the __alloc_size attributes to inlines[1], all allocator inlines need to explicitly call into extern functions that contain a size argument. Provide these wrappers that end up just ignoring the size argument for the actual allocation. This allows CONFIG_FORTIFY_SOURCE=y to see all various dynamic allocation sizes under GCC 12+ and all supported Clang versions. [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503 Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Cc: linux-mm@xxxxxxxxx Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> --- include/linux/slab.h | 8 ++++++-- mm/slab_common.c | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 970e9504949e..051d86ca31a8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -442,6 +442,8 @@ static_assert(PAGE_SHIFT <= 20); void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_sized(struct kmem_cache *s, gfp_t flags, size_t size) + __assume_slab_alignment __alloc_size(3); void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *s, void *objp); @@ -469,6 +471,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm __alloc_size(1); void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_node_sized(struct kmem_cache *s, gfp_t flags, int node, size_t size) + __assume_slab_alignment __alloc_size(4); #ifdef CONFIG_TRACING void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) @@ -482,7 +486,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, static __always_inline __alloc_size(3) void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) { - void *ret = kmem_cache_alloc(s, flags); + void *ret = kmem_cache_alloc_sized(s, flags, size); ret = kasan_kmalloc(s, ret, size, flags); return ret; @@ -492,7 +496,7 @@ static __always_inline __alloc_size(4) void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = kmem_cache_alloc_node(s, gfpflags, node); + void *ret = kmem_cache_alloc_node_sized(s, gfpflags, node, size); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; diff --git a/mm/slab_common.c b/mm/slab_common.c index 33b1886b06eb..5fa547539a6a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1457,6 +1457,20 @@ size_t ksize(const void *objp) } EXPORT_SYMBOL(ksize); +/* Wrapper so __alloc_size() can see the actual allocation size. */ +void *kmem_cache_alloc_sized(struct kmem_cache *s, gfp_t flags, size_t size) +{ + return kmem_cache_alloc(s, flags); +} +EXPORT_SYMBOL(kmem_cache_alloc_sized); + +/* Wrapper so __alloc_size() can see the actual allocation size. */ +void *kmem_cache_alloc_node_sized(struct kmem_cache *s, gfp_t flags, int node, size_t size) +{ + return kmem_cache_alloc_node(s, flags, node); +} +EXPORT_SYMBOL(kmem_cache_alloc_node_sized); + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); -- 2.34.1