Re: [RFC PATCH bpf-next 04/11] bpf: Add rbtree map

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 7/22/22 11:34 AM, Dave Marchevsky wrote:
+
+static struct rb_node *rbtree_map_alloc_node(struct bpf_map *map, size_t sz)
+{
+	struct rb_node *node;
+
+	node = bpf_map_kmalloc_node(map, sz, GFP_KERNEL, map->numa_node);

As Yonghong pointed out this should be GFP_NOWAIT for now.
Later we can convert this to bpf_mem_alloc to make sure it's safe from any context.

+	if (!node)
+		return NULL;
+	RB_CLEAR_NODE(node);
+	return node;
+}
+
+BPF_CALL_2(bpf_rbtree_alloc_node, struct bpf_map *, map, u32, sz)
+{
+	struct rb_node *node;
+
+	if (map->map_type != BPF_MAP_TYPE_RBTREE)
+		return (u64)NULL;
+
+	if (sz < sizeof(*node))
+		return (u64)NULL;
+
+	node = rbtree_map_alloc_node(map, (size_t)sz);
+	if (!node)
+		return (u64)NULL;
+
+	return (u64)node;
+}
+
+const struct bpf_func_proto bpf_rbtree_alloc_node_proto = {
+	.func = bpf_rbtree_alloc_node,
+	.gpl_only = true,
+	.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
+	.ret_btf_id = &bpf_rbtree_btf_ids[0],

since the btf_id is unused please use
.ret_btf_id   = BPF_PTR_POISON
as bpf_kptr_xchg_proto() is doing.

+
+BPF_CALL_2(bpf_rbtree_remove, struct bpf_map *, map, void *, value)
+{
+	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+	struct rb_node *node = (struct rb_node *)value;
+
+	if (WARN_ON_ONCE(RB_EMPTY_NODE(node)))
+		return (u64)NULL;
+
+	rb_erase_cached(node, &tree->root);
+	RB_CLEAR_NODE(node);
+	return (u64)node;
+}
+
+const struct bpf_func_proto bpf_rbtree_remove_proto = {
+	.func = bpf_rbtree_remove,
+	.gpl_only = true,
+	.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
+	.ret_btf_id = &bpf_rbtree_btf_ids[0],
+	.arg1_type = ARG_CONST_MAP_PTR,
+	.arg2_type = ARG_PTR_TO_BTF_ID,
+	.arg2_btf_id = &bpf_rbtree_btf_ids[0],

same for args.

+
+BTF_ID_LIST_SINGLE(bpf_rbtree_map_btf_ids, struct, bpf_rbtree)

can be removed?

+const struct bpf_map_ops rbtree_map_ops = {
+	.map_meta_equal = bpf_map_meta_equal,
+	.map_alloc_check = rbtree_map_alloc_check,
+	.map_alloc = rbtree_map_alloc,
+	.map_free = rbtree_map_free,
+	.map_get_next_key = rbtree_map_get_next_key,
+	.map_push_elem = rbtree_map_push_elem,
+	.map_peek_elem = rbtree_map_peek_elem,
+	.map_pop_elem = rbtree_map_pop_elem,
+	.map_lookup_elem = rbtree_map_lookup_elem,
+	.map_update_elem = rbtree_map_update_elem,
+	.map_delete_elem = rbtree_map_delete_elem,
+	.map_check_btf = rbtree_map_check_btf,
+	.map_btf_id = &bpf_rbtree_map_btf_ids[0],
+};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1f50becce141..535f673882cd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -481,7 +481,9 @@ static bool is_acquire_function(enum bpf_func_id func_id,
  	    func_id == BPF_FUNC_sk_lookup_udp ||
  	    func_id == BPF_FUNC_skc_lookup_tcp ||
  	    func_id == BPF_FUNC_ringbuf_reserve ||
-	    func_id == BPF_FUNC_kptr_xchg)
+	    func_id == BPF_FUNC_kptr_xchg ||
+	    func_id == BPF_FUNC_rbtree_alloc_node ||
+	    func_id == BPF_FUNC_rbtree_remove)
  		return true;
if (func_id == BPF_FUNC_map_lookup_elem &&
@@ -531,6 +533,20 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
  	       insn->imm == BPF_CMPXCHG;
  }
+static bool function_manipulates_rbtree_node(enum bpf_func_id func_id)
+{
+	return func_id == BPF_FUNC_rbtree_add ||
+		func_id == BPF_FUNC_rbtree_remove ||
+		func_id == BPF_FUNC_rbtree_free_node;
+}
+
+static bool function_returns_rbtree_node(enum bpf_func_id func_id)
+{
+	return func_id == BPF_FUNC_rbtree_alloc_node ||
+		func_id == BPF_FUNC_rbtree_add ||
+		func_id == BPF_FUNC_rbtree_remove;
+}
+
  /* string representation of 'enum bpf_reg_type'
   *
   * Note that reg_type_str() can not appear more than once in a single verbose()
@@ -3784,6 +3800,13 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
  	return 0;
  }


>    * [ TODO: Existing logic prevents any writes to PTR_TO_BTF_ID. This
>      broadly turned off in this patch and replaced with "no writes to
>      struct rb_node is PTR_TO_BTF_ID struct has one". This is a hack and
>      needs to be replaced. ]

..

+static bool access_may_touch_field(u32 access_off, size_t access_sz,

can_write is more accurate.
There is no ambiguity here. atype == BPF_WRITE.

+				   u32 field_off, size_t field_sz)
+{
+	return access_off < field_off + field_sz &&
+		field_off < access_off + access_sz;
+}
+
  /* if any part of struct field can be touched by
   * load/store reject this program.
   * To check that [x1, x2) overlaps with [y1, y2)
@@ -4490,7 +4513,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
  	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
  	enum bpf_type_flag flag = 0;
  	u32 btf_id;
-	int ret;
+	int ret, rb_node_off;
if (off < 0) {
  		verbose(env,
@@ -4527,8 +4550,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
  						  off, size, atype, &btf_id, &flag);
  	} else {
  		if (atype != BPF_READ) {
-			verbose(env, "only read is supported\n");
-			return -EACCES;
+			rb_node_off = btf_find_rb_node(reg->btf, t);
+			if (rb_node_off < 0 ||
+			    access_may_touch_field(off, size, rb_node_off,
+						   sizeof(struct rb_node))) {
+				verbose(env, "only read is supported\n");
+				return -EACCES;
+			}

Allowing writes into ptr_to_btf_id probably should be a separate patch.
It's a big change.
btf_find_rb_node() alone is not enough.
Otherwise bpf progs will be able to write into any struct that has 'rb_node'.
Maybe check that reg->btf == this prog's btf ?
Also allow writes into scalars only?
All pointers in prog's struct should be __kptr anyway to be safe.


  		}
ret = btf_struct_access(&env->log, reg->btf, t, off, size,
@@ -5764,6 +5792,17 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
  		if (meta->func_id == BPF_FUNC_kptr_xchg) {
  			if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno))
  				return -EACCES;
+		} else if (function_manipulates_rbtree_node(meta->func_id)) {
+			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
+						  meta->map_ptr->btf,
+						  meta->map_ptr->btf_value_type_id,
+						  strict_type_match)) {
+				verbose(env, "rbtree: R%d is of type %s but %s is expected\n",
+					regno, kernel_type_name(reg->btf, reg->btf_id),
+					kernel_type_name(meta->map_ptr->btf,
+							 meta->map_ptr->btf_value_type_id));
+				return -EACCES;
+			}
  		} else if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
  						 btf_vmlinux, *arg_btf_id,
  						 strict_type_match)) {
@@ -6369,10 +6408,17 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
  		break;
  	case BPF_FUNC_map_pop_elem:
  		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
+		    map->map_type != BPF_MAP_TYPE_RBTREE &&
  		    map->map_type != BPF_MAP_TYPE_STACK)
  			goto error;
  		break;
  	case BPF_FUNC_map_peek_elem:
+		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
+		    map->map_type != BPF_MAP_TYPE_STACK &&
+		    map->map_type != BPF_MAP_TYPE_RBTREE &&
+		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
+			goto error;
+		break;
  	case BPF_FUNC_map_push_elem:
  		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
  		    map->map_type != BPF_MAP_TYPE_STACK &&
@@ -6828,6 +6874,57 @@ static int set_loop_callback_state(struct bpf_verifier_env *env,
  	return 0;
  }
+static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
+					 struct bpf_func_state *caller,
+					 struct bpf_func_state *callee,
+					 int insn_idx)
+{
+	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+	/* bpf_rbtree_add(struct bpf_map *map, void *value, void *cb)
+	 * cb(struct rb_node *a, const struct rb_node *b);
+	 */
+	callee->regs[BPF_REG_1].type = PTR_TO_MAP_VALUE;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
+	callee->regs[BPF_REG_1].map_ptr = map_ptr;
+
+	callee->regs[BPF_REG_2].type = PTR_TO_MAP_VALUE;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
+	callee->regs[BPF_REG_2].map_ptr = map_ptr;
+
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+	callee->in_callback_fn = true;
+	return 0;
+}
+
+static int set_rbtree_find_callback_state(struct bpf_verifier_env *env,
+					  struct bpf_func_state *caller,
+					  struct bpf_func_state *callee,
+					  int insn_idx)
+{
+	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+	/* bpf_rbtree_find(struct bpf_map *map, void *key, void *cb)
+	 * cb(void *key, const struct rb_node *b);
+	 */
+	callee->regs[BPF_REG_1].type = PTR_TO_MAP_VALUE;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
+	callee->regs[BPF_REG_1].map_ptr = map_ptr;
+
+	callee->regs[BPF_REG_2].type = PTR_TO_MAP_VALUE;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
+	callee->regs[BPF_REG_2].map_ptr = map_ptr;
+
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+	callee->in_callback_fn = true;

add and find looks the same until this point.
Reuse set_rbtree_add_callback_state here?

+	callee->callback_ret_range = tnum_range(0, U64_MAX);

that's to enforce that add's cb can only return 0 or 1 ?
But that would require bpf prog to have different cb-s for add and find.
Is this ok?

+	return 0;
+}
+
  static int set_timer_callback_state(struct bpf_verifier_env *env,
  				    struct bpf_func_state *caller,
  				    struct bpf_func_state *callee,
@@ -7310,6 +7407,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
  		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
  					set_loop_callback_state);
  		break;
+	case BPF_FUNC_rbtree_add:
+		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+					set_rbtree_add_callback_state);
+		break;
+	case BPF_FUNC_rbtree_find:
+		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+					set_rbtree_find_callback_state);
+		break;
  	case BPF_FUNC_dynptr_from_mem:
  		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
  			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
@@ -7424,6 +7529,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
  		if (func_id == BPF_FUNC_kptr_xchg) {
  			ret_btf = meta.kptr_off_desc->kptr.btf;
  			ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
+		} else if (function_returns_rbtree_node(func_id)) {
+			ret_btf = meta.map_ptr->btf;
+			ret_btf_id = meta.map_ptr->btf_value_type_id;
  		} else {
  			ret_btf = btf_vmlinux;
  			ret_btf_id = *fn->ret_btf_id;
@@ -13462,8 +13570,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
  					BPF_SIZE((insn)->code);
  				env->prog->aux->num_exentries++;
  			} else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
+				/*TODO: Not sure what to do here
  				verbose(env, "Writes through BTF pointers are not allowed\n");
  				return -EINVAL;
+				*/

Not sure whether it's worth defining PTR_TO_BTF_ID | PROGS_BTF
for writeable ptr_to_btf_id as return value from rb_alloc/find/add.
It may help here and earlier ?

All ptr_to_btf_id were kernel's or module's BTF so far.
With this change the verifier will see prog's ptr_to_btf_id that point
to prog's BTF.
PROGS_BTF might be a useful flag to have ?

Then instead of
> +		} else if (function_returns_rbtree_node(func_id)) {
> +			ret_btf = meta.map_ptr->btf;
> +			ret_btf_id = meta.map_ptr->btf_value_type_id;

it will check PROGS_BTF flag in proto->ret_type ?
Still not fully generic, since it takes btf and btf_id from the map.
But a bit cleaner than switch() by func_id ?



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux