[PATCH bpf-next 1/4] bpf: Add map_num_entries map op

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch extends map operations with map_num_entries that returns the number of
entries currently present in the map. Provides implementation of the ops
for maps that track the number of elements added in them.

Co-developed-by: Nick Zavaritsky <mejedi@xxxxxxxxx>
Signed-off-by: Nick Zavaritsky <mejedi@xxxxxxxxx>
Signed-off-by: Charalampos Stylianopoulos <charalampos.stylianopoulos@xxxxxxxxx>
---
 include/linux/bpf.h               |  3 +++
 include/linux/bpf_local_storage.h |  1 +
 kernel/bpf/devmap.c               | 14 ++++++++++++++
 kernel/bpf/hashtab.c              | 10 ++++++++++
 kernel/bpf/lpm_trie.c             |  8 ++++++++
 kernel/bpf/queue_stack_maps.c     | 11 ++++++++++-
 6 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index feda0ce90f5a..217260a8f5f4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -175,6 +175,7 @@ struct bpf_map_ops {
 				     void *callback_ctx, u64 flags);
 
 	u64 (*map_mem_usage)(const struct bpf_map *map);
+	s64 (*map_num_entries)(const struct bpf_map *map);
 
 	/* BTF id of struct allocated by map_alloc */
 	int *map_btf_id;
@@ -2402,6 +2403,8 @@ static inline void bpf_map_dec_elem_count(struct bpf_map *map)
 	this_cpu_dec(*map->elem_count);
 }
 
+s64 bpf_map_sum_elem_count(const struct bpf_map *map);
+
 extern int sysctl_unprivileged_bpf_disabled;
 
 bool bpf_token_capable(const struct bpf_token *token, int cap);
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index ab7244d8108f..3a9e69e44c1d 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -204,5 +204,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 			 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
 
 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
+s64 bpf_local_storage_map_num_entries(const struct bpf_map *map);
 
 #endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 3aa002a47a96..f43a58389f8f 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -1041,6 +1041,18 @@ static u64 dev_map_mem_usage(const struct bpf_map *map)
 	return usage;
 }
 
+static s64 dev_map_num_entries(const struct bpf_map *map)
+{
+	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+	s64 entries = 0;
+
+	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
+		entries = atomic_read((atomic_t *)&dtab->items);
+	else
+		entries = -EOPNOTSUPP;
+	return entries;
+}
+
 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
 const struct bpf_map_ops dev_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -1053,6 +1065,7 @@ const struct bpf_map_ops dev_map_ops = {
 	.map_delete_elem = dev_map_delete_elem,
 	.map_check_btf = map_check_no_btf,
 	.map_mem_usage = dev_map_mem_usage,
+	.map_num_entries = dev_map_num_entries,
 	.map_btf_id = &dev_map_btf_ids[0],
 	.map_redirect = dev_map_redirect,
 };
@@ -1068,6 +1081,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
 	.map_delete_elem = dev_map_hash_delete_elem,
 	.map_check_btf = map_check_no_btf,
 	.map_mem_usage = dev_map_mem_usage,
+	.map_num_entries = dev_map_num_entries,
 	.map_btf_id = &dev_map_btf_ids[0],
 	.map_redirect = dev_hash_map_redirect,
 };
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ec941a0ea41..769a4c33c81f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2287,6 +2287,11 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
 	return usage;
 }
 
+static s64 htab_map_num_entries(const struct bpf_map *map)
+{
+	return bpf_map_sum_elem_count(map);
+}
+
 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
 const struct bpf_map_ops htab_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -2304,6 +2309,7 @@ const struct bpf_map_ops htab_map_ops = {
 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 	.map_for_each_callback = bpf_for_each_hash_elem,
 	.map_mem_usage = htab_map_mem_usage,
+	.map_num_entries = htab_map_num_entries,
 	BATCH_OPS(htab),
 	.map_btf_id = &htab_map_btf_ids[0],
 	.iter_seq_info = &iter_seq_info,
@@ -2326,6 +2332,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 	.map_for_each_callback = bpf_for_each_hash_elem,
 	.map_mem_usage = htab_map_mem_usage,
+	.map_num_entries = htab_map_num_entries,
 	BATCH_OPS(htab_lru),
 	.map_btf_id = &htab_map_btf_ids[0],
 	.iter_seq_info = &iter_seq_info,
@@ -2499,6 +2506,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 	.map_for_each_callback = bpf_for_each_hash_elem,
 	.map_mem_usage = htab_map_mem_usage,
+	.map_num_entries = htab_map_num_entries,
 	BATCH_OPS(htab_percpu),
 	.map_btf_id = &htab_map_btf_ids[0],
 	.iter_seq_info = &iter_seq_info,
@@ -2519,6 +2527,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
 	.map_for_each_callback = bpf_for_each_hash_elem,
 	.map_mem_usage = htab_map_mem_usage,
+	.map_num_entries = htab_map_num_entries,
 	BATCH_OPS(htab_lru_percpu),
 	.map_btf_id = &htab_map_btf_ids[0],
 	.iter_seq_info = &iter_seq_info,
@@ -2663,6 +2672,7 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
 	.map_gen_lookup = htab_of_map_gen_lookup,
 	.map_check_btf = map_check_no_btf,
 	.map_mem_usage = htab_map_mem_usage,
+	.map_num_entries = htab_map_num_entries,
 	BATCH_OPS(htab),
 	.map_btf_id = &htab_map_btf_ids[0],
 };
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index f8bc1e096182..5297eb2e8e97 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -780,6 +780,13 @@ static u64 trie_mem_usage(const struct bpf_map *map)
 	return elem_size * READ_ONCE(trie->n_entries);
 }
 
+static s64 trie_num_entries(const struct bpf_map *map)
+{
+	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+
+	return READ_ONCE(trie->n_entries);
+}
+
 BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
 const struct bpf_map_ops trie_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -794,5 +801,6 @@ const struct bpf_map_ops trie_map_ops = {
 	.map_delete_batch = generic_map_delete_batch,
 	.map_check_btf = trie_check_btf,
 	.map_mem_usage = trie_mem_usage,
+	.map_num_entries = trie_num_entries,
 	.map_btf_id = &trie_map_btf_ids[0],
 };
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..f66aa31248e7 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -22,7 +22,7 @@ struct bpf_queue_stack {
 	char elements[] __aligned(8);
 };
 
-static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
+static struct bpf_queue_stack *bpf_queue_stack(const struct bpf_map *map)
 {
 	return container_of(map, struct bpf_queue_stack, map);
 }
@@ -265,6 +265,13 @@ static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
 	return usage;
 }
 
+static s64 queue_stack_map_num_entries(const struct bpf_map *map)
+{
+	struct bpf_queue_stack *qs = bpf_queue_stack(map);
+	s64 entries = qs->head - qs->tail;
+	return entries;
+}
+
 BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
 const struct bpf_map_ops queue_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -279,6 +286,7 @@ const struct bpf_map_ops queue_map_ops = {
 	.map_peek_elem = queue_map_peek_elem,
 	.map_get_next_key = queue_stack_map_get_next_key,
 	.map_mem_usage = queue_stack_map_mem_usage,
+	.map_num_entries = queue_stack_map_num_entries,
 	.map_btf_id = &queue_map_btf_ids[0],
 };
 
@@ -295,5 +303,6 @@ const struct bpf_map_ops stack_map_ops = {
 	.map_peek_elem = stack_map_peek_elem,
 	.map_get_next_key = queue_stack_map_get_next_key,
 	.map_mem_usage = queue_stack_map_mem_usage,
+	.map_num_entries = queue_stack_map_num_entries,
 	.map_btf_id = &queue_map_btf_ids[0],
 };
-- 
2.43.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux