This patch fixes an endless loop condition that can occur in bpf_for_each_hash_elem, causing the core to softlock. My understanding is that a combination of RCU list deletion and insertion introduces the new element after the iteration cursor and that there is a chance that an RCU reader may in fact use this new element in iteration. The patch uses a _safe variant of the macro which gets the next element to iterate before executing the loop body for the current element. The following simple BPF program can be used to reproduce the issue: #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #define N (64) struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, N); __type(key, __u64); __type(value, __u64); } map SEC(".maps"); static int cb(struct bpf_map *map, __u64 *key, __u64 *value, void *arg) { bpf_map_delete_elem(map, key); bpf_map_update_elem(map, &key, &val, 0); return 0; } SEC("uprobe//proc/self/exe:test") int BPF_PROG(test) { __u64 i; bpf_for(i, 0, N) { bpf_map_update_elem(&map, &i, &i, 0); } bpf_for_each_map_elem(&map, cb, NULL, 0); return 0; } char LICENSE[] SEC("license") = "GPL"; Signed-off-by: Brandon Kammerdiener <brandon.kammerdiener@xxxxxxxxx> --- kernel/bpf/hashtab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4a9eeb7aef85..43574b0495c3 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -2224,7 +2224,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ b = &htab->buckets[i]; rcu_read_lock(); head = &b->head; - hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) { key = elem->key; if (is_percpu) { /* current cpu value for percpu map */ -- 2.48.1