[PATCH bpf-next 5/7] bpf: Factor out the element allocation for pre-allocated htab

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Hou Tao <houtao1@xxxxxxxxxx>

The element allocation for pre-allocated htab is composed of two logics:
1) when there is old_element, directly reuse per-cpu extra_elems
Also stash the old_element as the next per-cpu extra_elems
2) when no old_element, allocate from per-cpu free list

The reuse and stash of per-cpu extra_elems will be broken into two
independent steps. After the breaking, per-cpu extra_elems may be NULL
when trying to reuse it and the allocation needs to fall-back to per-cpu
free list when it happens.

Therefore, factor out the element allocation to a helper to make the
following change be straightforward.

Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
---
 kernel/bpf/hashtab.c | 49 +++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 17 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3c6eebabb492..9211df2adda4 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1021,6 +1021,34 @@ static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
 	       BITS_PER_LONG == 64;
 }
 
+static struct htab_elem *alloc_preallocated_htab_elem(struct bpf_htab *htab,
+						      struct htab_elem *old_elem)
+{
+	struct pcpu_freelist_node *l;
+	struct htab_elem *l_new;
+
+	if (old_elem) {
+		struct htab_elem **pl_new;
+
+		/* if we're updating the existing element,
+		 * use per-cpu extra elems to avoid freelist_pop/push
+		 */
+		pl_new = this_cpu_ptr(htab->extra_elems);
+		l_new = *pl_new;
+		*pl_new = old_elem;
+		return l_new;
+	}
+
+	l = __pcpu_freelist_pop(&htab->freelist);
+	if (!l)
+		return ERR_PTR(-E2BIG);
+
+	l_new = container_of(l, struct htab_elem, fnode);
+	bpf_map_inc_elem_count(&htab->map);
+
+	return l_new;
+}
+
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 					 void *value, u32 key_size, u32 hash,
 					 bool percpu, bool onallcpus,
@@ -1028,26 +1056,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 {
 	u32 size = htab->map.value_size;
 	bool prealloc = htab_is_prealloc(htab);
-	struct htab_elem *l_new, **pl_new;
+	struct htab_elem *l_new;
 	void __percpu *pptr;
 
 	if (prealloc) {
-		if (old_elem) {
-			/* if we're updating the existing element,
-			 * use per-cpu extra elems to avoid freelist_pop/push
-			 */
-			pl_new = this_cpu_ptr(htab->extra_elems);
-			l_new = *pl_new;
-			*pl_new = old_elem;
-		} else {
-			struct pcpu_freelist_node *l;
-
-			l = __pcpu_freelist_pop(&htab->freelist);
-			if (!l)
-				return ERR_PTR(-E2BIG);
-			l_new = container_of(l, struct htab_elem, fnode);
-			bpf_map_inc_elem_count(&htab->map);
-		}
+		l_new = alloc_preallocated_htab_elem(htab, old_elem);
+		if (IS_ERR(l_new))
+			return l_new;
 	} else {
 		if (is_map_full(htab))
 			if (!old_elem)
-- 
2.29.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux