[PATCH v2 bpf-next 4/6] bpf: Add bpf_dynptr_clone

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add a new helper, bpf_dynptr_clone, which clones a dynptr.

The cloned dynptr will point to the same data as its parent dynptr,
with the same type, offset, size and read-only properties.

Any writes to a dynptr will be reflected across all instances
(by 'instance', this means any dynptrs that point to the same
underlying data).

Please note that data slice and dynptr invalidations will affect all
instances as well. For example, if bpf_dynptr_write() is called on an
skb-type dynptr, all data slices of dynptr instances to that skb
will be invalidated as well (eg data slices of any clones, parents,
grandparents, ...). Another example is if a ringbuf dynptr is submitted,
any instance of that dynptr will be invalidated.

Changing the view of the dynptr (eg advancing the offset or
trimming the size) will only affect that dynptr and not affect any
other instances.

One example use case where cloning may be helpful is for hashing or
iterating through dynptr data. Cloning will allow the user to maintain
the original view of the dynptr for future use, while also allowing
views to smaller subsets of the data after the offset is advanced or the
size is trimmed.

Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx>
---
 include/uapi/linux/bpf.h       |  26 +++++++
 kernel/bpf/helpers.c           |  34 ++++++++
 kernel/bpf/verifier.c          | 138 +++++++++++++++++++++------------
 tools/include/uapi/linux/bpf.h |  26 +++++++
 4 files changed, 173 insertions(+), 51 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 5ad52d481cde..f9387c5aba2b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5594,6 +5594,31 @@ union bpf_attr {
  *	Return
  *		The offset of the dynptr on success, -EINVAL if the dynptr is
  *		invalid.
+ *
+ * long bpf_dynptr_clone(struct bpf_dynptr *ptr, struct bpf_dynptr *clone, u32 offset)
+ *	Description
+ *		Clone an initialized dynptr *ptr*. After this call, both *ptr*
+ *		and *clone* will point to the same underlying data. If non-zero,
+ *		*offset* specifies how many bytes to advance the cloned dynptr by.
+ *
+ *		*clone* must be an uninitialized dynptr.
+ *
+ *		Any data slice or dynptr invalidations will apply equally for
+ *		both dynptrs after this call. For example, if ptr1 is a
+ *		ringbuf-type dynptr with multiple data slices that is cloned to
+ *		ptr2, if ptr2 discards the ringbuf sample, then ptr2, ptr2's
+ *		data slices, ptr1, and ptr1's data slices will all be
+ *		invalidated.
+ *
+ *		This is convenient for getting different "views" to the same
+ *		data. For instance, if one wishes to hash only a particular
+ *		section of data, one can clone the dynptr, advance it to a
+ *		specified offset and trim it to a specified size, pass it
+ *		to the hash function, and discard it after hashing, without
+ *		losing access to the original view of the dynptr.
+ *	Return
+ *		0 on success, -EINVAL if the dynptr to clone is invalid, -ERANGE
+ *		if attempting to clone the dynptr at an out of range offset.
  */
 #define ___BPF_FUNC_MAPPER(FN, ctx...)			\
 	FN(unspec, 0, ##ctx)				\
@@ -5816,6 +5841,7 @@ union bpf_attr {
 	FN(dynptr_is_rdonly, 217, ##ctx)		\
 	FN(dynptr_get_size, 218, ##ctx)		\
 	FN(dynptr_get_offset, 219, ##ctx)		\
+	FN(dynptr_clone, 220, ##ctx)			\
 	/* */
 
 /* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 0164d7e4b5a6..0c2cfb4ed33c 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1760,6 +1760,38 @@ static const struct bpf_func_proto bpf_dynptr_get_offset_proto = {
 	.arg1_type	= ARG_PTR_TO_DYNPTR,
 };
 
+BPF_CALL_3(bpf_dynptr_clone, struct bpf_dynptr_kern *, ptr,
+	   struct bpf_dynptr_kern *, clone, u32, offset)
+{
+	int err = -EINVAL;
+
+	if (!ptr->data)
+		goto error;
+
+	memcpy(clone, ptr, sizeof(*clone));
+
+	if (offset) {
+		err = bpf_dynptr_adjust(clone, offset, offset);
+		if (err)
+			goto error;
+	}
+
+	return 0;
+
+error:
+	bpf_dynptr_set_null(clone);
+	return err;
+}
+
+static const struct bpf_func_proto bpf_dynptr_clone_proto = {
+	.func		= bpf_dynptr_clone,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_DYNPTR,
+	.arg2_type	= ARG_PTR_TO_DYNPTR | MEM_UNINIT,
+	.arg3_type	= ARG_ANYTHING,
+};
+
 const struct bpf_func_proto bpf_get_current_task_proto __weak;
 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
@@ -1876,6 +1908,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
 		return &bpf_dynptr_get_size_proto;
 	case BPF_FUNC_dynptr_get_offset:
 		return &bpf_dynptr_get_offset_proto;
+	case BPF_FUNC_dynptr_clone:
+		return &bpf_dynptr_clone_proto;
 #ifdef CONFIG_CGROUPS
 	case BPF_FUNC_cgrp_storage_get:
 		return &bpf_cgrp_storage_get_proto;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4d81d159254b..3f617f7040b7 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -719,17 +719,53 @@ static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
 	}
 }
 
+static bool arg_type_is_dynptr(enum bpf_arg_type type)
+{
+	return base_type(type) == ARG_PTR_TO_DYNPTR;
+}
+
 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
 {
 	return type == BPF_DYNPTR_TYPE_RINGBUF;
 }
 
-static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
-				   enum bpf_arg_type arg_type, int insn_idx)
+static struct bpf_reg_state *get_dynptr_arg_reg(const struct bpf_func_proto *fn,
+						struct bpf_reg_state *regs)
+{
+	enum bpf_arg_type t;
+	int i;
+
+	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
+		t = fn->arg_type[i];
+		if (arg_type_is_dynptr(t) && !(t & MEM_UNINIT))
+			return &regs[BPF_REG_1 + i];
+	}
+
+	return NULL;
+}
+
+static enum bpf_dynptr_type stack_slot_get_dynptr_info(struct bpf_verifier_env *env,
+						       struct bpf_reg_state *reg,
+						       int *ref_obj_id)
+{
+	struct bpf_func_state *state = func(env, reg);
+	int spi = get_spi(reg->off);
+
+	if (ref_obj_id)
+		*ref_obj_id = state->stack[spi].spilled_ptr.id;
+
+	return state->stack[spi].spilled_ptr.dynptr.type;
+}
+
+static int mark_stack_slots_dynptr(struct bpf_verifier_env *env,
+				   const struct bpf_func_proto *fn,
+				   struct bpf_reg_state *reg,
+				   enum bpf_arg_type arg_type,
+				   int insn_idx, int func_id)
 {
 	struct bpf_func_state *state = func(env, reg);
 	enum bpf_dynptr_type type;
-	int spi, i, id;
+	int spi, i, id = 0;
 
 	spi = get_spi(reg->off);
 
@@ -741,7 +777,21 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
 	}
 
-	type = arg_to_dynptr_type(arg_type);
+	if (func_id == BPF_FUNC_dynptr_clone) {
+		/* find the type and id of the dynptr we're cloning and
+		 * assign it to the clone
+		 */
+		struct bpf_reg_state *parent_state = get_dynptr_arg_reg(fn, state->regs);
+
+		if (!parent_state) {
+			verbose(env, "verifier internal error: no parent dynptr in bpf_dynptr_clone()\n");
+			return -EFAULT;
+		}
+		type = stack_slot_get_dynptr_info(env, parent_state, &id);
+	} else {
+		type = arg_to_dynptr_type(arg_type);
+	}
+
 	if (type == BPF_DYNPTR_TYPE_INVALID)
 		return -EINVAL;
 
@@ -751,9 +801,11 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
 
 	if (dynptr_type_refcounted(type)) {
 		/* The id is used to track proper releasing */
-		id = acquire_reference_state(env, insn_idx);
-		if (id < 0)
-			return id;
+		if (!id) {
+			id = acquire_reference_state(env, insn_idx);
+			if (id < 0)
+				return id;
+		}
 
 		state->stack[spi].spilled_ptr.id = id;
 		state->stack[spi - 1].spilled_ptr.id = id;
@@ -762,6 +814,17 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
 	return 0;
 }
 
+static void invalidate_dynptr(struct bpf_func_state *state, int spi)
+{
+	int i;
+
+	state->stack[spi].spilled_ptr.id = 0;
+	for (i = 0; i < BPF_REG_SIZE; i++)
+		state->stack[spi].slot_type[i] = STACK_INVALID;
+	state->stack[spi].spilled_ptr.dynptr.first_slot = false;
+	state->stack[spi].spilled_ptr.dynptr.type = 0;
+}
+
 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
 {
 	struct bpf_func_state *state = func(env, reg);
@@ -772,22 +835,25 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
 		return -EINVAL;
 
-	for (i = 0; i < BPF_REG_SIZE; i++) {
-		state->stack[spi].slot_type[i] = STACK_INVALID;
-		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
-	}
-
-	/* Invalidate any slices associated with this dynptr */
 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
+		int id = state->stack[spi].spilled_ptr.id;
+
+		/* If the dynptr is refcounted, we need to invalidate two things:
+		 * 1) any dynptrs with a matching id
+		 * 2) any slices associated with the dynptr id
+		 */
+
 		release_reference(env, state->stack[spi].spilled_ptr.id);
-		state->stack[spi].spilled_ptr.id = 0;
-		state->stack[spi - 1].spilled_ptr.id = 0;
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+			if (state->stack[i].slot_type[0] == STACK_DYNPTR &&
+			    state->stack[i].spilled_ptr.id == id)
+				invalidate_dynptr(state, i);
+		}
+	} else {
+		invalidate_dynptr(state, spi);
+		invalidate_dynptr(state, spi - 1);
 	}
 
-	state->stack[spi].spilled_ptr.dynptr.first_slot = false;
-	state->stack[spi].spilled_ptr.dynptr.type = 0;
-	state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
-
 	return 0;
 }
 
@@ -5862,11 +5928,6 @@ static bool arg_type_is_release(enum bpf_arg_type type)
 	return type & OBJ_RELEASE;
 }
 
-static bool arg_type_is_dynptr(enum bpf_arg_type type)
-{
-	return base_type(type) == ARG_PTR_TO_DYNPTR;
-}
-
 static int int_ptr_type_to_size(enum bpf_arg_type type)
 {
 	if (type == ARG_PTR_TO_INT)
@@ -6176,31 +6237,6 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
 	return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
 }
 
-static struct bpf_reg_state *get_dynptr_arg_reg(const struct bpf_func_proto *fn,
-						struct bpf_reg_state *regs)
-{
-	int i;
-
-	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
-		if (arg_type_is_dynptr(fn->arg_type[i]))
-			return &regs[BPF_REG_1 + i];
-
-	return NULL;
-}
-
-static enum bpf_dynptr_type stack_slot_get_dynptr_info(struct bpf_verifier_env *env,
-						       struct bpf_reg_state *reg,
-						       int *ref_obj_id)
-{
-	struct bpf_func_state *state = func(env, reg);
-	int spi = get_spi(reg->off);
-
-	if (ref_obj_id)
-		*ref_obj_id = state->stack[spi].spilled_ptr.id;
-
-	return state->stack[spi].spilled_ptr.dynptr.type;
-}
-
 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
 			  struct bpf_call_arg_meta *meta,
 			  const struct bpf_func_proto *fn)
@@ -7697,9 +7733,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 				return err;
 		}
 
-		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
+		err = mark_stack_slots_dynptr(env, fn, &regs[meta.uninit_dynptr_regno],
 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
-					      insn_idx);
+					      insn_idx, func_id);
 		if (err)
 			return err;
 	}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 5ad52d481cde..f9387c5aba2b 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5594,6 +5594,31 @@ union bpf_attr {
  *	Return
  *		The offset of the dynptr on success, -EINVAL if the dynptr is
  *		invalid.
+ *
+ * long bpf_dynptr_clone(struct bpf_dynptr *ptr, struct bpf_dynptr *clone, u32 offset)
+ *	Description
+ *		Clone an initialized dynptr *ptr*. After this call, both *ptr*
+ *		and *clone* will point to the same underlying data. If non-zero,
+ *		*offset* specifies how many bytes to advance the cloned dynptr by.
+ *
+ *		*clone* must be an uninitialized dynptr.
+ *
+ *		Any data slice or dynptr invalidations will apply equally for
+ *		both dynptrs after this call. For example, if ptr1 is a
+ *		ringbuf-type dynptr with multiple data slices that is cloned to
+ *		ptr2, if ptr2 discards the ringbuf sample, then ptr2, ptr2's
+ *		data slices, ptr1, and ptr1's data slices will all be
+ *		invalidated.
+ *
+ *		This is convenient for getting different "views" to the same
+ *		data. For instance, if one wishes to hash only a particular
+ *		section of data, one can clone the dynptr, advance it to a
+ *		specified offset and trim it to a specified size, pass it
+ *		to the hash function, and discard it after hashing, without
+ *		losing access to the original view of the dynptr.
+ *	Return
+ *		0 on success, -EINVAL if the dynptr to clone is invalid, -ERANGE
+ *		if attempting to clone the dynptr at an out of range offset.
  */
 #define ___BPF_FUNC_MAPPER(FN, ctx...)			\
 	FN(unspec, 0, ##ctx)				\
@@ -5816,6 +5841,7 @@ union bpf_attr {
 	FN(dynptr_is_rdonly, 217, ##ctx)		\
 	FN(dynptr_get_size, 218, ##ctx)		\
 	FN(dynptr_get_offset, 219, ##ctx)		\
+	FN(dynptr_clone, 220, ##ctx)			\
 	/* */
 
 /* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
-- 
2.30.2




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux