[PATCH bpf-next v1 7/8] bpf: Add bpf_dynptr_iterator

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add a new helper function, bpf_dynptr_iterator:

  long bpf_dynptr_iterator(struct bpf_dynptr *ptr, void *callback_fn,
			   void *callback_ctx, u64 flags)

where callback_fn is defined as:

  long (*callback_fn)(struct bpf_dynptr *ptr, void *ctx)

and callback_fn returns the number of bytes to advance the
dynptr by (or an error code in the case of error). The iteration
will stop if the callback_fn returns 0 or an error or tries to
advance by more bytes than available.

Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx>
---
 include/uapi/linux/bpf.h       | 20 ++++++++++++++
 kernel/bpf/helpers.c           | 48 +++++++++++++++++++++++++++++++---
 kernel/bpf/verifier.c          | 27 +++++++++++++++++++
 tools/include/uapi/linux/bpf.h | 20 ++++++++++++++
 4 files changed, 111 insertions(+), 4 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 16973fa4d073..ff78a94c262a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5531,6 +5531,25 @@ union bpf_attr {
  *		losing access to the original view of the dynptr.
  *	Return
  *		0 on success, -EINVAL if the dynptr to clone is invalid.
+ *
+ * long bpf_dynptr_iterator(struct bpf_dynptr *ptr, void *callback_fn, void *callback_ctx, u64 flags)
+ *	Description
+ *		Iterate through the dynptr data, calling **callback_fn** on each
+ *		iteration with **callback_ctx** as the context parameter.
+ *		The **callback_fn** should be a static function and
+ *		the **callback_ctx** should be a pointer to the stack.
+ *		Currently **flags** is unused and must be 0.
+ *
+ *		long (\*callback_fn)(struct bpf_dynptr \*ptr, void \*ctx);
+ *
+ *		where **callback_fn** returns the number of bytes to advance
+ *		the dynptr by or an error. The iteration will stop if **callback_fn**
+ *		returns 0 or an error or tries to advance by more bytes than the
+ *		size of the dynptr.
+ *	Return
+ *		0 on success, -EINVAL if the dynptr is invalid or **flags** is not 0,
+ *		-ERANGE if attempting to iterate more bytes than available, or other
+ *		negative error if **callback_fn** returns an error.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5752,6 +5771,7 @@ union bpf_attr {
 	FN(dynptr_get_size),		\
 	FN(dynptr_get_offset),		\
 	FN(dynptr_clone),		\
+	FN(dynptr_iterator),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 667f1e213a61..519b3da06d49 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1653,13 +1653,11 @@ static const struct bpf_func_proto bpf_dynptr_data_proto = {
 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
 };
 
-BPF_CALL_2(bpf_dynptr_advance, struct bpf_dynptr_kern *, ptr, u32, len)
+/* *ptr* should always be a valid dynptr */
+static int __bpf_dynptr_advance(struct bpf_dynptr_kern *ptr, u32 len)
 {
 	u32 size;
 
-	if (!ptr->data)
-		return -EINVAL;
-
 	size = __bpf_dynptr_get_size(ptr);
 
 	if (len > size)
@@ -1672,6 +1670,14 @@ BPF_CALL_2(bpf_dynptr_advance, struct bpf_dynptr_kern *, ptr, u32, len)
 	return 0;
 }
 
+BPF_CALL_2(bpf_dynptr_advance, struct bpf_dynptr_kern *, ptr, u32, len)
+{
+	if (!ptr->data)
+		return -EINVAL;
+
+	return __bpf_dynptr_advance(ptr, len);
+}
+
 static const struct bpf_func_proto bpf_dynptr_advance_proto = {
 	.func		= bpf_dynptr_advance,
 	.gpl_only	= false,
@@ -1783,6 +1789,38 @@ static const struct bpf_func_proto bpf_dynptr_clone_proto = {
 	.arg2_type	= ARG_PTR_TO_DYNPTR | MEM_UNINIT,
 };
 
+BPF_CALL_4(bpf_dynptr_iterator, struct bpf_dynptr_kern *, ptr, void *, callback_fn,
+	   void *, callback_ctx, u64, flags)
+{
+	bpf_callback_t callback = (bpf_callback_t)callback_fn;
+	int nr_bytes, err;
+
+	if (!ptr->data || flags)
+		return -EINVAL;
+
+	while (ptr->size > 0) {
+		nr_bytes = callback((u64)(long)ptr, (u64)(long)callback_ctx, 0, 0, 0);
+		if (nr_bytes <= 0)
+			return nr_bytes;
+
+		err = __bpf_dynptr_advance(ptr, nr_bytes);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_dynptr_iterator_proto = {
+	.func		= bpf_dynptr_iterator,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_DYNPTR,
+	.arg2_type	= ARG_PTR_TO_FUNC,
+	.arg3_type	= ARG_PTR_TO_STACK_OR_NULL,
+	.arg4_type	= ARG_ANYTHING,
+};
+
 const struct bpf_func_proto bpf_get_current_task_proto __weak;
 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
@@ -1869,6 +1907,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
 		return &bpf_dynptr_get_offset_proto;
 	case BPF_FUNC_dynptr_clone:
 		return &bpf_dynptr_clone_proto;
+	case BPF_FUNC_dynptr_iterator:
+		return &bpf_dynptr_iterator_proto;
 	default:
 		break;
 	}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2eb2a4410344..76108cd4ed85 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6901,6 +6901,29 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env,
 	return 0;
 }
 
+static int set_dynptr_iterator_callback_state(struct bpf_verifier_env *env,
+					      struct bpf_func_state *caller,
+					      struct bpf_func_state *callee,
+					      int insn_idx)
+{
+	/* bpf_dynptr_iterator(struct bpf_dynptr *ptr, void *callback_fn,
+	 * void *callback_ctx, u64 flags);
+	 *
+	 * callback_fn(struct bpf_dynptr *ptr, void *callback_ctx);
+	 */
+	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
+	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
+	callee->callback_ret_range = tnum_range(0, U64_MAX);
+
+	/* unused */
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+
+	callee->in_callback_fn = true;
+	return 0;
+}
+
 static int set_loop_callback_state(struct bpf_verifier_env *env,
 				   struct bpf_func_state *caller,
 				   struct bpf_func_state *callee,
@@ -7472,6 +7495,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 
 		break;
 	}
+	case BPF_FUNC_dynptr_iterator:
+		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+					set_dynptr_iterator_callback_state);
+		break;
 	}
 
 	if (err)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 16973fa4d073..ff78a94c262a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5531,6 +5531,25 @@ union bpf_attr {
  *		losing access to the original view of the dynptr.
  *	Return
  *		0 on success, -EINVAL if the dynptr to clone is invalid.
+ *
+ * long bpf_dynptr_iterator(struct bpf_dynptr *ptr, void *callback_fn, void *callback_ctx, u64 flags)
+ *	Description
+ *		Iterate through the dynptr data, calling **callback_fn** on each
+ *		iteration with **callback_ctx** as the context parameter.
+ *		The **callback_fn** should be a static function and
+ *		the **callback_ctx** should be a pointer to the stack.
+ *		Currently **flags** is unused and must be 0.
+ *
+ *		long (\*callback_fn)(struct bpf_dynptr \*ptr, void \*ctx);
+ *
+ *		where **callback_fn** returns the number of bytes to advance
+ *		the dynptr by or an error. The iteration will stop if **callback_fn**
+ *		returns 0 or an error or tries to advance by more bytes than the
+ *		size of the dynptr.
+ *	Return
+ *		0 on success, -EINVAL if the dynptr is invalid or **flags** is not 0,
+ *		-ERANGE if attempting to iterate more bytes than available, or other
+ *		negative error if **callback_fn** returns an error.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5752,6 +5771,7 @@ union bpf_attr {
 	FN(dynptr_get_size),		\
 	FN(dynptr_get_offset),		\
 	FN(dynptr_clone),		\
+	FN(dynptr_iterator),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
-- 
2.30.2




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux