Make sure that the following unsafe example is rejected by verifier: 1: r9 = ... some pointer with range X ... 2: r6 = ... unbound scalar ID=a ... 3: r7 = ... unbound scalar ID=b ... 4: if (r6 > r7) goto +1 5: r6 = r7 6: if (r6 > X) goto ... --- checkpoint --- 7: r9 += r7 8: *(u64 *)r9 = Y This example is unsafe because not all execution paths verify r7 range. Because of the jump at (4) the verifier would arrive at (6) in two states: I. r6{.id=b}, r7{.id=b} via path 1-6; II. r6{.id=a}, r7{.id=b} via path 1-4, 6. Currently regsafe() does not call check_ids() for scalar registers, thus from POV of regsafe() states (I) and (II) are identical. If the path 1-6 is taken by verifier first, and checkpoint is created at (6) the path [1-4, 6] would be considered safe. This commit updates regsafe() to call check_ids() for precise scalar registers. To minimize the impact on verification performance, avoid generating bpf_reg_state::id for constant scalar values when processing BPF_MOV in check_alu_op(). Scalar IDs are utilized by find_equal_scalars() to propagate information about value ranges for registers that hold the same value. However, there is no need to propagate range information for constants. Still, there is some performance impact because of this change. Using veristat to compare number of processed states for selftests object files listed in tools/testing/selftests/bpf/veristat.cfg and Cilium object files from [1] gives the following statistics: $ ./veristat -e file,prog,states -f "states_pct>10" \ -C master-baseline.log current.log File Program States (DIFF) ----------- ------------------------------ -------------- bpf_xdp.o tail_handle_nat_fwd_ipv6 +155 (+23.92%) bpf_xdp.o tail_nodeport_nat_ingress_ipv4 +102 (+27.20%) bpf_xdp.o tail_rev_nodeport_lb4 +83 (+20.85%) loop6.bpf.o trace_virtqueue_add_sgs +25 (+11.06%) Also test case verifier_search_pruning/allocated_stack has to be updated to avoid conflicts in register ID assignments between cached and new states. [1] git@xxxxxxxxxx:anakryiko/cilium.git Fixes: 75748837b7e5 ("bpf: Propagate scalar ranges through register assignments.") Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx> --- kernel/bpf/verifier.c | 34 ++++++++++++++++--- .../bpf/progs/verifier_search_pruning.c | 3 +- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2aa60b73f1b5..175ca22b868e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12933,12 +12933,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) if (BPF_SRC(insn->code) == BPF_X) { struct bpf_reg_state *src_reg = regs + insn->src_reg; struct bpf_reg_state *dst_reg = regs + insn->dst_reg; + bool need_id = (src_reg->type == SCALAR_VALUE && !src_reg->id && + !tnum_is_const(src_reg->var_off)); if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ - if (src_reg->type == SCALAR_VALUE && !src_reg->id) + if (need_id) /* Assign src and dst registers the same ID * that will be used by find_equal_scalars() * to propagate min/max range. @@ -12957,7 +12959,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } else if (src_reg->type == SCALAR_VALUE) { bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX; - if (is_src_reg_u32 && !src_reg->id) + if (is_src_reg_u32 && need_id) src_reg->id = ++env->id_gen; copy_register_state(dst_reg, src_reg); /* Make sure ID is cleared if src_reg is not in u32 range otherwise @@ -15289,9 +15291,33 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, return false; if (!rold->precise) return true; - /* new val must satisfy old val knowledge */ + /* Why check_ids() for scalar registers? + * + * Consider the following BPF code: + * 1: r6 = ... unbound scalar, ID=a ... + * 2: r7 = ... unbound scalar, ID=b ... + * 3: if (r6 > r7) goto +1 + * 4: r6 = r7 + * 5: if (r6 > X) goto ... + * 6: ... memory operation using r7 ... + * + * First verification path is [1-6]: + * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; + * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark + * r7 <= X, because r6 and r7 share same id. + * Next verification path is [1-4, 6]. + * + * Instruction (6) would be reached in two states: + * I. r6{.id=b}, r7{.id=b} via path 1-6; + * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. + * + * Use check_ids() to distinguish these states. + * --- + * Also verify that new value satisfies old value range knowledge. + */ return range_within(rold, rcur) && - tnum_in(rold->var_off, rcur->var_off); + tnum_in(rold->var_off, rcur->var_off) && + check_ids(rold->id, rcur->id, idmap); case PTR_TO_MAP_KEY: case PTR_TO_MAP_VALUE: case PTR_TO_MEM: diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c index 5a14498d352f..bb3cd14bb3a1 100644 --- a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c +++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c @@ -271,7 +271,7 @@ l2_%=: r0 = 0; \ SEC("socket") __description("allocated_stack") -__success __msg("processed 15 insns") +__success __msg("processed 16 insns") __success_unpriv __msg_unpriv("") __log_level(1) __retval(0) __naked void allocated_stack(void) { @@ -279,6 +279,7 @@ __naked void allocated_stack(void) r6 = r1; \ call %[bpf_get_prandom_u32]; \ r7 = r0; \ + call %[bpf_get_prandom_u32]; \ if r0 == 0 goto l0_%=; \ r0 = 0; \ *(u64*)(r10 - 8) = r6; \ -- 2.40.1