On 3/30/23 1:56 AM, Yonghong Song wrote:
Currently, the verifier does not handle '<const> <cond_op> <non_const>' well.
For example,
...
10: (79) r1 = *(u64 *)(r10 -16) ; R1_w=scalar() R10=fp0
11: (b7) r2 = 0 ; R2_w=0
12: (2d) if r2 > r1 goto pc+2
13: (b7) r0 = 0
14: (95) exit
15: (65) if r1 s> 0x1 goto pc+3
16: (0f) r0 += r1
...
At insn 12, verifier decides both true and false branch are possible, but
actually only false branch is possible.
Currently, the verifier already supports patterns '<non_const> <cond_op> <const>.
Add support for patterns '<const> <cond_op> <non_const>' in a similar way.
Also fix selftest 'verifier_bounds_mix_sign_unsign/bounds checks mixing signed and unsigned, variant 10'
due to this change.
Signed-off-by: Yonghong Song <yhs@xxxxxx>
---
kernel/bpf/verifier.c | 12 ++++++++++++
.../bpf/progs/verifier_bounds_mix_sign_unsign.c | 2 +-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 90bb6d25bc9c..d070943a8ba1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13302,6 +13302,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
src_reg->var_off.value,
opcode,
is_jmp32);
+ } else if (dst_reg->type == SCALAR_VALUE &&
+ is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) {
+ pred = is_branch_taken(src_reg,
+ tnum_subreg(dst_reg->var_off).value,
+ flip_opcode(opcode),
+ is_jmp32);
+ } else if (dst_reg->type == SCALAR_VALUE &&
+ !is_jmp32 && tnum_is_const(dst_reg->var_off)) {
+ pred = is_branch_taken(src_reg,
+ dst_reg->var_off.value,
+ flip_opcode(opcode),
+ is_jmp32);
} else if (reg_is_pkt_pointer_any(dst_reg) &&
reg_is_pkt_pointer_any(src_reg) &&
!is_jmp32) {
Looking at the two SCALAR_VALUE 'else if's above these added lines, these
additions make sense. Having four separate 'else if' checks for essentially
similar logic makes this hard to read, though, maybe it's an opportunity to
refactor a bit.
While trying to make sense of the logic here I attempted to simplify with
a helper:
@@ -13234,6 +13234,21 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
}));
}
+static int maybe_const_operand_branch(struct tnum maybe_const_op,
+ struct bpf_reg_state *other_op_reg,
+ u8 opcode, bool is_jmp32)
+{
+ struct tnum jmp_tnum = is_jmp32 ? tnum_subreg(maybe_const_op) :
+ maybe_const_op;
+ if (!tnum_is_const(jmp_tnum))
+ return -1;
+
+ return is_branch_taken(other_op_reg,
+ jmp_tnum.value,
+ opcode,
+ is_jmp32);
+}
+
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
@@ -13287,18 +13302,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (BPF_SRC(insn->code) == BPF_K) {
pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
- } else if (src_reg->type == SCALAR_VALUE &&
- is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
- pred = is_branch_taken(dst_reg,
- tnum_subreg(src_reg->var_off).value,
- opcode,
- is_jmp32);
- } else if (src_reg->type == SCALAR_VALUE &&
- !is_jmp32 && tnum_is_const(src_reg->var_off)) {
- pred = is_branch_taken(dst_reg,
- src_reg->var_off.value,
- opcode,
- is_jmp32);
+ } else if (src_reg->type == SCALAR_VALUE) {
+ pred = maybe_const_operand_branch(src_reg->var_off, dst_reg,
+ opcode, is_jmp32);
+ } else if (dst_reg->type == SCALAR_VALUE) {
+ pred = maybe_const_operand_branch(dst_reg->var_off, src_reg,
+ flip_opcode(opcode), is_jmp32);
} else if (reg_is_pkt_pointer_any(dst_reg) &&
reg_is_pkt_pointer_any(src_reg) &&
!is_jmp32) {
I think the resultant logic is the same as your patch, but it's easier to
understand, for me at least. Note that I didn't test the above.