The bpf program calling these helpers must hold the spinlock associated with the rbtree map when doing so. Otherwise, a concurrent add/remove operation could corrupt the tree while {add,remove,find} are walking it with callback or pivoting after update. Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx> --- kernel/bpf/rbtree.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c index bf2e30af82ec..5b1ab73e164f 100644 --- a/kernel/bpf/rbtree.c +++ b/kernel/bpf/rbtree.c @@ -14,6 +14,11 @@ struct bpf_rbtree { BTF_ID_LIST_SINGLE(bpf_rbtree_btf_ids, struct, rb_node); +static bool __rbtree_lock_held(struct bpf_rbtree *tree) +{ + return spin_is_locked((spinlock_t *)tree->lock); +} + static int rbtree_map_alloc_check(union bpf_attr *attr) { if (attr->max_entries || !attr->btf_value_type_id) @@ -93,6 +98,9 @@ BPF_CALL_3(bpf_rbtree_add, struct bpf_map *, map, void *, value, void *, cb) struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map); struct rb_node *node = (struct rb_node *)value; + if (!__rbtree_lock_held(tree)) + return (u64)NULL; + if (WARN_ON_ONCE(!RB_EMPTY_NODE(node))) return (u64)NULL; @@ -114,6 +122,9 @@ BPF_CALL_3(bpf_rbtree_find, struct bpf_map *, map, void *, key, void *, cb) { struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map); + if (!__rbtree_lock_held(tree)) + return (u64)NULL; + return (u64)rb_find(key, &tree->root.rb_root, (int (*)(const void *key, const struct rb_node *))cb); @@ -206,6 +217,9 @@ BPF_CALL_2(bpf_rbtree_remove, struct bpf_map *, map, void *, value) struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map); struct rb_node *node = (struct rb_node *)value; + if (!__rbtree_lock_held(tree)) + return (u64)NULL; + if (WARN_ON_ONCE(RB_EMPTY_NODE(node))) return (u64)NULL; -- 2.30.2