Re: [PATCH bpf-next v7 3/5] bpf: Inline calls to bpf_loop when callback is known

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 6/13/22 10:50 PM, Eduard Zingerman wrote:
[...]
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index d5d96ceca105..7e8fd49406f6 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -723,9 +723,6 @@ const struct bpf_func_proto bpf_for_each_map_elem_proto = {
  	.arg4_type	= ARG_ANYTHING,
  };
-/* maximum number of loops */
-#define MAX_LOOPS	BIT(23)
-
  BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
  	   u64, flags)
  {
@@ -733,9 +730,13 @@ BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
  	u64 ret;
  	u32 i;
+ /* Note: these safety checks are also verified when bpf_loop
+	 * is inlined, be careful to modify this code in sync. See
+	 * function verifier.c:inline_bpf_loop.
+	 */
  	if (flags)
  		return -EINVAL;
-	if (nr_loops > MAX_LOOPS)
+	if (nr_loops > BPF_MAX_LOOPS)
  		return -E2BIG;
for (i = 0; i < nr_loops; i++) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2d2872682278..db854c09b603 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7103,6 +7103,38 @@ static int check_get_func_ip(struct bpf_verifier_env *env)
  	return -ENOTSUPP;
  }
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
+{
+	return &env->insn_aux_data[env->insn_idx];
+}
+
+static bool loop_flag_is_zero(struct bpf_verifier_env *env)
+{
+	struct bpf_reg_state *regs = cur_regs(env);
+	struct bpf_reg_state *reg = &regs[BPF_REG_4];
+
+	return register_is_const(reg) && reg->var_off.value == 0;

I think you might also need to add precision tracking for the flag check :

mark_chain_precision(env, BPF_REG_4)

See also cc52d9140aa92 ("bpf: Fix record_func_key to perform backtracking on r3").. not too
much of an issue at the moment, but once we extend flags.

+}
+
+static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
+{
+	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
+
+	if (!state->initialized) {
+		state->initialized = 1;
+		state->fit_for_inline = loop_flag_is_zero(env);
+		state->callback_subprogno = subprogno;
+		return;
+	}
+
+	if (!state->fit_for_inline)
+		return;
+
+	state->fit_for_inline =
+		loop_flag_is_zero(env) &&
+		state->callback_subprogno == subprogno;
+}
+
  static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
  			     int *insn_idx_p)
  {
@@ -7255,6 +7287,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
  		err = check_bpf_snprintf_call(env, regs);
  		break;
  	case BPF_FUNC_loop:
+		update_loop_inline_state(env, meta.subprogno);
  		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
  					set_loop_callback_state);
  		break;



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux