From: Kent Overstreet <kent.overstreet@xxxxxxxxx> This gives better memory allocation profiling results; rhashtable allocations will be accounted to the code that initialized the rhashtable. Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- include/linux/rhashtable-types.h | 11 +++++-- lib/rhashtable.c | 52 +++++++++++++++++++++++++------- 2 files changed, 50 insertions(+), 13 deletions(-) diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index b6f3797277ff..015c8298bebc 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -9,6 +9,7 @@ #ifndef _LINUX_RHASHTABLE_TYPES_H #define _LINUX_RHASHTABLE_TYPES_H +#include <linux/alloc_tag.h> #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/mutex.h> @@ -88,6 +89,9 @@ struct rhashtable { struct mutex mutex; spinlock_t lock; atomic_t nelems; +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif }; /** @@ -127,9 +131,12 @@ struct rhashtable_iter { bool end_of_table; }; -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params); -int rhltable_init(struct rhltable *hlt, +#define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__)) + +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params); +#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__)) #endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6ae2ba8e06a2..b62116f332b8 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -63,6 +63,27 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #define ASSERT_RHT_MUTEX(HT) #endif +#ifdef CONFIG_MEM_ALLOC_PROFILING +static inline void rhashtable_alloc_tag_init(struct rhashtable *ht) +{ + ht->alloc_tag = current->alloc_tag; +} + +static inline struct alloc_tag *rhashtable_alloc_tag_save(struct rhashtable *ht) +{ + return alloc_tag_save(ht->alloc_tag); +} + +static inline void rhashtable_alloc_tag_restore(struct rhashtable *ht, struct alloc_tag *old) +{ + alloc_tag_restore(ht->alloc_tag, old); +} +#else +#define rhashtable_alloc_tag_init(ht) +static inline struct alloc_tag *rhashtable_alloc_tag_save(struct rhashtable *ht) { return NULL; } +#define rhashtable_alloc_tag_restore(ht, old) +#endif + static inline union nested_table *nested_table_top( const struct bucket_table *tbl) { @@ -130,7 +151,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, if (ntbl) return ntbl; - ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + ntbl = kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) @@ -157,7 +178,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size = sizeof(*tbl) + sizeof(tbl->buckets[0]); - tbl = kzalloc(size, gfp); + tbl = kmalloc_noprof(size, gfp|__GFP_ZERO); if (!tbl) return NULL; @@ -180,8 +201,10 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t size; int i; static struct lock_class_key __key; + struct alloc_tag * __maybe_unused old = rhashtable_alloc_tag_save(ht); - tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); + tbl = kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), + gfp|__GFP_ZERO, NUMA_NO_NODE); size = nbuckets; @@ -190,6 +213,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, nbuckets = 0; } + rhashtable_alloc_tag_restore(ht, old); + if (tbl == NULL) return NULL; @@ -975,7 +1000,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) } /** - * rhashtable_init - initialize a new hash table + * rhashtable_init_noprof - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * @@ -1016,7 +1041,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) * .obj_hashfn = my_hash_fn, * }; */ -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; @@ -1031,6 +1056,8 @@ int rhashtable_init(struct rhashtable *ht, spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); + rhashtable_alloc_tag_init(ht); + if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); @@ -1076,26 +1103,26 @@ int rhashtable_init(struct rhashtable *ht, return 0; } -EXPORT_SYMBOL_GPL(rhashtable_init); +EXPORT_SYMBOL_GPL(rhashtable_init_noprof); /** - * rhltable_init - initialize a new hash list table + * rhltable_init_noprof - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * - * See documentation for rhashtable_init. + * See documentation for rhashtable_init_noprof. */ -int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) { int err; - err = rhashtable_init(&hlt->ht, params); + err = rhashtable_init_noprof(&hlt->ht, params); hlt->ht.rhlist = true; return err; } -EXPORT_SYMBOL_GPL(rhltable_init); +EXPORT_SYMBOL_GPL(rhltable_init_noprof); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), @@ -1222,6 +1249,7 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert( unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; + struct alloc_tag * __maybe_unused old = rhashtable_alloc_tag_save(ht); ntbl = nested_table_top(tbl); hash >>= tbl->nest; @@ -1236,6 +1264,8 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert( size <= (1 << shift)); } + rhashtable_alloc_tag_restore(ht, old); + if (!ntbl) return NULL; -- 2.43.0.687.g38aa6559b0-goog