Re: [RFC PATCH bpf-next 1/3] bpf: factor out helpers for htab bucket and element lookup

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 12/18/21 9:22 PM, Hou Tao wrote:
Call to htab_map_hash() and element lookup (e.g. lookup_elem_raw())
are scattered all over the place. In order to make the change of hash
algorithm and element comparison logic easy, factor out three helpers
correspondinging to three lookup patterns in htab imlementation:

1) lookup element locklessly by key (e.g. htab_map_lookup_elem)
nolock_lookup_elem()
2) lookup element with bucket locked (e.g. htab_map_delete_elem)
lock_lookup_elem()
3) lookup bucket and lock it later (e.g. htab_map_update_elem)
lookup_bucket()

For performance reason, mark these three helpers as always_inline.

Also factor out two helpers: next_elem() and first_elem() to
make the iteration of element list more concise.

Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
---
  kernel/bpf/hashtab.c | 350 ++++++++++++++++++++++---------------------
  1 file changed, 183 insertions(+), 167 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index d29af9988f37..e21e27162e08 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -127,6 +127,23 @@ struct htab_elem {
  	char key[] __aligned(8);
  };
+struct nolock_lookup_elem_result {
+	struct htab_elem *elem;
+	u32 hash;
+};
+
+struct lock_lookup_elem_result {
+	struct bucket *bucket;
+	unsigned long flags;
+	struct htab_elem *elem;
+	u32 hash;
+};
+
+struct lookup_bucket_result {
+	struct bucket *bucket;
+	u32 hash;
+};
+
  static inline bool htab_is_prealloc(const struct bpf_htab *htab)
  {
  	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
@@ -233,6 +250,22 @@ static bool htab_has_extra_elems(struct bpf_htab *htab)
  	return !htab_is_percpu(htab) && !htab_is_lru(htab);
  }
+static inline struct htab_elem *next_elem(const struct htab_elem *e)
+{
+	struct hlist_nulls_node *n =
+		rcu_dereference_raw(hlist_nulls_next_rcu(&e->hash_node));
+
+	return hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
+}
+
+static inline struct htab_elem *first_elem(const struct hlist_nulls_head *head)
+{
+	struct hlist_nulls_node *n =
+		rcu_dereference_raw(hlist_nulls_first_rcu(head));
+
+	return hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
+}
+
  static void htab_free_prealloced_timers(struct bpf_htab *htab)
  {
  	u32 num_entries = htab->map.max_entries;
@@ -614,6 +647,59 @@ static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
  	return NULL;
  }
+static __always_inline void
+nolock_lookup_elem(struct bpf_htab *htab, void *key,
+		   struct nolock_lookup_elem_result *e)

There is no need to mark such (non-trivial) functions as __always_inline. Let compiler decide whether inlining
should be done or not.


+{
+	struct hlist_nulls_head *head;
+	u32 key_size, hash;
+
+	key_size = htab->map.key_size;
+	hash = htab_map_hash(key, key_size, htab->hashrnd);
+	head = select_bucket(htab, hash);
+
+	e->elem = lookup_nulls_elem_raw(head, hash, key, key_size,
+					htab->n_buckets);
+	e->hash = hash;
+}
+
+static __always_inline void
+lock_lookup_elem(struct bpf_htab *htab, void *key,
+		 struct lock_lookup_elem_result *e)
[...]



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux