Introduce new helper bpf_map_kvcalloc() for this memory allocation. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/bpf.h | 8 ++++++++ kernel/bpf/bpf_local_storage.c | 4 ++-- kernel/bpf/syscall.c | 15 +++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d7485b7..ebb6ed4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1656,6 +1656,8 @@ int generic_map_delete_batch(struct bpf_map *map, void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, int node); void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); +void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, + gfp_t flags); void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags); #else @@ -1672,6 +1674,12 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, return kzalloc(size, flags); } +static inline void * +bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) +{ + return kvcalloc(n, size, flags); +} + static inline void __percpu * bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags) diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 67ab249..71d5bf1 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -620,8 +620,8 @@ struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) nbuckets = max_t(u32, 2, nbuckets); smap->bucket_log = ilog2(nbuckets); - smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, - GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT); + smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets), + nbuckets, GFP_USER | __GFP_NOWARN); if (!smap->buckets) { bpf_map_area_free(smap, &smap->map); return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 02ce7e9..2bd3bcf 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -489,6 +489,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) return ptr; } +void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, + gfp_t flags) +{ + struct mem_cgroup *memcg, *old_memcg; + void *ptr; + + memcg = bpf_map_get_memcg(map); + old_memcg = set_active_memcg(memcg); + ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); + set_active_memcg(old_memcg); + bpf_map_put_memcg(memcg); + + return ptr; +} + void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags) { -- 1.8.3.1