For slab allocations, record whether the call site is using a fixed size (i.e. compile time constant) or a dynamic size. Report the results in /proc/allocinfo. Improvements needed: - examine realloc routines for needed coverage Signed-off-by: Kees Cook <kees@xxxxxxxxxx> --- Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- include/linux/alloc_tag.h | 30 ++++++++++++++++++++++++++---- include/linux/slab.h | 16 ++++++++-------- lib/alloc_tag.c | 8 ++++++++ mm/Kconfig | 8 ++++++++ 4 files changed, 50 insertions(+), 12 deletions(-) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 8c61ccd161ba..f5d8c5849b82 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -20,6 +20,19 @@ struct alloc_tag_counters { u64 calls; }; +#ifdef CONFIG_SLAB_PER_SITE +struct alloc_meta { + /* 0 means non-slab, SIZE_MAX means dynamic, and everything else is fixed-size. */ + size_t sized; +}; +#define ALLOC_META_INIT(_size) { \ + .sized = (__builtin_constant_p(_size) ? (_size) : SIZE_MAX), \ + } +#else +struct alloc_meta { }; +#define ALLOC_META_INIT(_size) { } +#endif + /* * An instance of this structure is created in a special ELF section at every * allocation callsite. At runtime, the special section is treated as @@ -27,6 +40,7 @@ struct alloc_tag_counters { */ struct alloc_tag { struct codetag ct; + struct alloc_meta meta; struct alloc_tag_counters __percpu *counters; } __aligned(8); @@ -74,19 +88,21 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) */ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); -#define DEFINE_ALLOC_TAG(_alloc_tag) \ +#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init) \ static struct alloc_tag _alloc_tag __used __aligned(8) \ __section("alloc_tags") = { \ .ct = CODE_TAG_INIT, \ + .meta = _meta_init, \ .counters = &_shared_alloc_tag }; #else /* ARCH_NEEDS_WEAK_PER_CPU */ -#define DEFINE_ALLOC_TAG(_alloc_tag) \ +#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init) \ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ static struct alloc_tag _alloc_tag __used __aligned(8) \ __section("alloc_tags") = { \ .ct = CODE_TAG_INIT, \ + .meta = _meta_init, \ .counters = &_alloc_tag_cntr }; #endif /* ARCH_NEEDS_WEAK_PER_CPU */ @@ -191,7 +207,7 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) #else /* CONFIG_MEM_ALLOC_PROFILING */ -#define DEFINE_ALLOC_TAG(_alloc_tag) +#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init) static inline bool mem_alloc_profiling_enabled(void) { return false; } static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) {} @@ -210,8 +226,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} #define alloc_hooks(_do_alloc) \ ({ \ - DEFINE_ALLOC_TAG(_alloc_tag); \ + DEFINE_ALLOC_TAG(_alloc_tag, { }); \ alloc_hooks_tag(&_alloc_tag, _do_alloc); \ }) +#define alloc_sized_hooks(_do_alloc, _size, ...) \ +({ \ + DEFINE_ALLOC_TAG(_alloc_tag, ALLOC_META_INIT(_size)); \ + alloc_hooks_tag(&_alloc_tag, _do_alloc(_size, __VA_ARGS__)); \ +}) + #endif /* _LINUX_ALLOC_TAG_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 86cb61a0102c..314d24c79e05 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -541,7 +541,7 @@ static_assert(PAGE_SHIFT <= 20); */ void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; -#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) +#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; @@ -685,7 +685,7 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f } return __kmalloc_noprof(size, flags); } -#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) +#define kmalloc(size, ...) alloc_sized_hooks(kmalloc_noprof, size, __VA_ARGS__) #define kmem_buckets_alloc(_b, _size, _flags) \ alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) @@ -708,7 +708,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf } return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node); } -#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) +#define kmalloc_node(size, ...) alloc_sized_hooks(kmalloc_node_noprof, size, __VA_ARGS__) /** * kmalloc_array - allocate memory for an array. @@ -726,7 +726,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz return kmalloc_noprof(bytes, flags); return kmalloc_noprof(bytes, flags); } -#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) +#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) /** * krealloc_array - reallocate memory for an array. @@ -761,8 +761,8 @@ void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flag unsigned long caller) __alloc_size(1); #define kmalloc_node_track_caller_noprof(size, flags, node, caller) \ __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller) -#define kmalloc_node_track_caller(...) \ - alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) +#define kmalloc_node_track_caller(size, ...) \ + alloc_sized_hooks(kmalloc_node_track_caller_noprof, size, __VA_ARGS__, _RET_IP_) /* * kmalloc_track_caller is a special version of kmalloc that records the @@ -807,13 +807,13 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) { return kmalloc_noprof(size, flags | __GFP_ZERO); } -#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) +#define kzalloc(size, ...) alloc_sized_hooks(kzalloc_noprof, size, __VA_ARGS__) #define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1); #define kvmalloc_node_noprof(size, flags, node) \ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node) -#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) +#define kvmalloc_node(size, ...) alloc_sized_hooks(kvmalloc_node_noprof, size, __VA_ARGS__) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) #define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 81e5f9a70f22..6d2cb72bf269 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -78,6 +78,14 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); codetag_to_text(out, ct); +#ifdef CONFIG_SLAB_PER_SITE + seq_buf_putc(out, ' '); + seq_buf_printf(out, "size:%s(%zu) slab:%s", + tag->meta.sized == 0 ? "non-slab" : + tag->meta.sized == SIZE_MAX ? "dynamic" : "fixed", + tag->meta.sized == SIZE_MAX ? 0 : tag->meta.sized, + tag->meta.cache ? "ready" : "unused"); +#endif seq_buf_putc(out, ' '); seq_buf_putc(out, '\n'); } diff --git a/mm/Kconfig b/mm/Kconfig index b72e7d040f78..855c63c3270d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -296,6 +296,14 @@ config SLAB_BUCKETS If unsure, say Y. +config SLAB_PER_SITE + bool "Separate slab allocations by call size" + depends on !SLUB_TINY + default SLAB_FREELIST_HARDENED + select SLAB_BUCKETS + help + Track sizes of kmalloc() call sites. + config SLUB_STATS default n bool "Enable performance statistics" -- 2.34.1