Re: [PATCH bpf-next v7 3/5] bpf: Inline calls to bpf_loop when callback is known

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> On Sun, 2022-06-19 at 14:10 -0700, Alexei Starovoitov wrote:
[...]
> yes.
> Just BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0) and ,1) in the other branch
> should do it.
> The 'mov' won't make the register precise.
> 
> So something like below:
> 
> r0 = random_u32
> r6 = random_u32
> if (r0)
>    goto L;
> 
> r4 = 0
> 
> pruning_point:
> if (r6) goto next;
> next:
> /* load callback address to r2 */
> BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 5),
> BPF_RAW_INSN(0, 0, 0, 0, 0),
> BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
> BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
> BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
> BPF_EXIT_INSN(),
> 
> L:
> r4 = 1
> goto pruning_point
> 
> The fallthrough path will proceed with r4=0
> and pruning will trigger is_state_visited() with r4=1
> which regsafe will incorrectly recognize as equivalent.
> 

Actually this was the first thing I tried. Here is the pseudo code
above translated to BPF instructions:

	/* main */
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
	BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
	BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 9),
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
	BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 0),
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
	BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 7),
	BPF_RAW_INSN(0, 0, 0, 0, 0),
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 1),
	BPF_JMP_IMM(BPF_JA, 0, 0, -10),
	/* callback */
	BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
	BPF_EXIT_INSN(),	

And here is the verification log for this program:

	#195/p don't inline bpf_loop call, flags non-zero 2 , verifier log:
	func#0 @0
	func#1 @16
	reg type unsupported for arg#0 function main#6
	
	from -1 to 0: R1=ctx(off=0,imm=0) R10=fp0
	0: (85) call bpf_jiffies64#118        ; R0_w=Pscalar()
	1: (bf) r6 = r0                       ; R0_w=Pscalar(id=1) R6_w=Pscalar(id=1)
	2: (85) call bpf_jiffies64#118        ; R0_w=Pscalar()
	3: (bf) r7 = r0                       ; R0_w=Pscalar(id=2) R7_w=Pscalar(id=2)
	4: (55) if r6 != 0x0 goto pc+9        ; R6_w=P0
-->	5: (b7) r4 = 0                        ; R4_w=P0
	6: (55) if r7 != 0x0 goto pc+0        ; R7_w=P0
	7: (b7) r1 = 1                        ; R1_w=P1
	8: (18) r2 = 0x7                      ; R2_w=func(off=0,imm=0)
	10: (b7) r3 = 0                       ; R3_w=P0
	11: (85) call bpf_loop#181
	reg type unsupported for arg#1 function callback#7
	caller:
	 R6=P0 R7=P0 R10=fp0
	callee:
	 frame1: R1=Pscalar() R2_w=P0 R10=fp0 cb
	16: frame1: R1_w=Pscalar() R2_w=P0 cb
	16: (b7) r0 = 1                       ; frame1: R0_w=P1 cb
	17: (95) exit
	returning from callee:
	 frame1: R0_w=P1 R1_w=Pscalar() R2_w=P0 R10=fp0 cb
	to caller at 12:
	 R0_w=Pscalar() R6=P0 R7=P0 R10=fp0
	
	from 17 to 12: R0_w=Pscalar() R6=P0 R7=P0 R10=fp0
	12: (b7) r0 = 0                       ; R0_w=P0
	13: (95) exit
	propagating r4
	
	from 6 to 7: safe
	
	from 4 to 14: R0=Pscalar(id=2) R6=Pscalar(id=1) R7=Pscalar(id=2) R10=fp0
-->	14: (b7) r4 = 1                       ; R4_w=P1
	15: (05) goto pc-10
	6: (55) if r7 != 0x0 goto pc+0        ; R7=P0
	7: (b7) r1 = 1                        ; R1_w=P1
	8: (18) r2 = 0x7                      ; R2_w=func(off=0,imm=0)
	10: (b7) r3 = 0                       ; R3_w=P0
	11: (85) call bpf_loop#181
	[...]

Note that for instructions [5] and [14] R4 is marked as precise. I
actually verified in the debugger that this happens because of the
following processing logic for MOV:

#0  check_alu_op (...) at kernel/bpf/verifier.c:9177
#1  ... in do_check (...) at kernel/bpf/verifier.c:12100
#2  do_check_common (...) at kernel/bpf/verifier.c:14552
#3  ... in do_check_main () at kernel/bpf/verifier.c:14615

The C code for relevant functions is below. Note that call to
`__mark_reg_unknown` will mark the register as precise when
`env->subprog_cnt > 1`, but for this test case it is 2.

static int do_check(struct bpf_verifier_env *env)
{
	...
		if (class == BPF_ALU || class == BPF_ALU64) {
12100:			err = check_alu_op(env, insn);
			if (err)
				return err;

		} else ...
	...
}

static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
	...
	else if (opcode == BPF_MOV) {
		...
		if (BPF_SRC(insn->code) == BPF_X) {
			...
		} else {
			/* case: R = imm
			 * remember the value we stored into this reg
			 */
			/* clear any state __mark_reg_known doesn't set */
			mark_reg_unknown(env, regs, insn->dst_reg);
			regs[insn->dst_reg].type = SCALAR_VALUE;
			if (BPF_CLASS(insn->code) == BPF_ALU64) {
9177:				__mark_reg_known(regs + insn->dst_reg,
						 insn->imm);
			} else {
				__mark_reg_known(regs + insn->dst_reg,
						 (u32)insn->imm);
			}
		}
		...
	}
	...
}

/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
			       struct bpf_reg_state *reg)
{
	...
	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
	...
}

static void mark_reg_unknown(struct bpf_verifier_env *env,
			     struct bpf_reg_state *regs, u32 regno)
{
	...
	__mark_reg_unknown(env, regs + regno);
}

Thanks,
Eduard




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux