[PATCH bpf-next 6/7] bpf: introduce bpf_mem_alloc_size()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Introduce helpers to get the memory usage of bpf_mem_alloc, includes the
bpf_mem_alloc pool and the in-use elements size. Note that we only count
the free list size in the bpf_mem_alloc pool but don't count other
lists, because there won't be too many elements on other lists. Ignoring
other lists could make the code simple.

Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx>
---
 include/linux/bpf_mem_alloc.h |  2 ++
 kernel/bpf/memalloc.c         | 70 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 3e164b8..86d8dcf 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -24,5 +24,7 @@ struct bpf_mem_alloc {
 /* kmem_cache_alloc/free equivalent: */
 void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
 void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+unsigned long bpf_mem_alloc_size(struct bpf_mem_alloc *ma);
+unsigned long bpf_mem_cache_elem_size(struct bpf_mem_alloc *ma, void *ptr);
 
 #endif /* _BPF_MEM_ALLOC_H */
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index ebcc3dd..ebf8964 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -224,6 +224,22 @@ static void free_one(struct bpf_mem_cache *c, void *obj)
 	kfree(obj);
 }
 
+unsigned long bpf_mem_cache_size(struct bpf_mem_cache *c, void *obj)
+{
+	unsigned long size;
+
+	if (!obj)
+		return 0;
+
+	if (c->percpu_size) {
+		size = percpu_size(((void **)obj)[1]);
+		size += ksize(obj);
+		return size;
+	}
+
+	return ksize(obj);
+}
+
 static void __free_rcu(struct rcu_head *head)
 {
 	struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
@@ -559,6 +575,41 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
 	}
 }
 
+/* We only account the elements on free list */
+static unsigned long bpf_mem_cache_free_size(struct bpf_mem_cache *c)
+{
+	return c->unit_size * c->free_cnt;
+}
+
+/* Get the free list size of a bpf_mem_alloc pool. */
+unsigned long bpf_mem_alloc_size(struct bpf_mem_alloc *ma)
+{
+	struct bpf_mem_caches *cc;
+	struct bpf_mem_cache *c;
+	unsigned long size = 0;
+	int cpu, i;
+
+	if (ma->cache) {
+		for_each_possible_cpu(cpu) {
+			c = per_cpu_ptr(ma->cache, cpu);
+			size += bpf_mem_cache_free_size(c);
+		}
+		size += percpu_size(ma->cache);
+	}
+	if (ma->caches) {
+		for_each_possible_cpu(cpu) {
+			cc = per_cpu_ptr(ma->caches, cpu);
+			for (i = 0; i < NUM_CACHES; i++) {
+				c = &cc->cache[i];
+				size += bpf_mem_cache_free_size(c);
+			}
+		}
+		size += percpu_size(ma->caches);
+	}
+
+	return size;
+}
+
 /* notrace is necessary here and in other functions to make sure
  * bpf programs cannot attach to them and cause llist corruptions.
  */
@@ -675,3 +726,22 @@ void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
 
 	unit_free(this_cpu_ptr(ma->cache), ptr);
 }
+
+/* Get elemet size from the element pointer @ptr */
+unsigned long notrace bpf_mem_cache_elem_size(struct bpf_mem_alloc *ma, void *ptr)
+{
+	struct llist_node *llnode;
+	struct bpf_mem_cache *c;
+	unsigned long size;
+
+	if (!ptr)
+		return 0;
+
+	llnode = ptr - LLIST_NODE_SZ;
+	migrate_disable();
+	c = this_cpu_ptr(ma->cache);
+	size = bpf_mem_cache_size(c, llnode);
+	migrate_enable();
+
+	return size;
+}
-- 
1.8.3.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux