We only use count for kmalloc hashtab not for prealloc hashtab, because __pcpu_freelist_pop() return NULL when no more elem in pcpu freelist. But the problem is that __pcpu_freelist_pop() will traverse all CPUs and spin_lock for all CPUs to find there is no more elem at last. We encountered bad case on big system with 96 CPUs that alloc_htab_elem() would last for 1ms. This patch use count for prealloc hashtab too, avoid traverse and spin_lock for all CPUs in this case. Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> --- kernel/bpf/hashtab.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 32471ba02708..0c432a23aa00 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -855,12 +855,12 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { htab_put_fd_value(htab, l); + atomic_dec(&htab->count); if (htab_is_prealloc(htab)) { check_and_free_timer(htab, l); __pcpu_freelist_push(&htab->freelist, &l->fnode); } else { - atomic_dec(&htab->count); l->htab = htab; call_rcu(&l->rcu, htab_elem_free_rcu); } @@ -938,6 +938,11 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, } else { struct pcpu_freelist_node *l; + if (atomic_inc_return(&htab->count) > htab->map.max_entries) { + l_new = ERR_PTR(-E2BIG); + goto dec_count; + } + l = __pcpu_freelist_pop(&htab->freelist); if (!l) return ERR_PTR(-E2BIG); -- 2.11.0