signed_add*_overflows() was added back when there was no overflow-check helper. With the introduction of such helpers in commit f0907827a8a91 ("compiler.h: enable builtin overflow checkers and add fallback code"), we can drop signed_add*_overflows() in kernel/bpf/verifier.c and use the generic check_add_overflow() instead. This will make future refactoring easier, and possibly taking advantage of compiler-emitted hardware instructions that efficiently implement these checks. Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@xxxxxxxx> --- shung-hsi.yu: maybe there's a better name instead of {min,max}_cur, but I coudln't come up with one. --- kernel/bpf/verifier.c | 74 ++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 44 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3f6be4923655..b1ad76c514f5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12720,26 +12720,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } -static bool signed_add_overflows(s64 a, s64 b) -{ - /* Do the add in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a + (u64)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add32_overflows(s32 a, s32 b) -{ - /* Do the add in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a + (u32)b); - - if (b < 0) - return res > a; - return res < a; -} - static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ @@ -13134,6 +13114,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_sanitize_info info = {}; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; + s64 smin_cur, smax_cur; + u64 umin_cur, umax_cur; int ret; dst_reg = ®s[dst]; @@ -13241,21 +13223,21 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ - if (signed_add_overflows(smin_ptr, smin_val) || - signed_add_overflows(smax_ptr, smax_val)) { + if (check_add_overflow(smin_ptr, smin_val, &smin_cur) || + check_add_overflow(smax_ptr, smax_val, &smax_cur)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { - dst_reg->smin_value = smin_ptr + smin_val; - dst_reg->smax_value = smax_ptr + smax_val; + dst_reg->smin_value = smin_cur; + dst_reg->smax_value = smax_cur; } - if (umin_ptr + umin_val < umin_ptr || - umax_ptr + umax_val < umax_ptr) { + if (check_add_overflow(umin_ptr, umin_val, &umin_cur) || + check_add_overflow(umax_ptr, umax_val, &umax_cur)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { - dst_reg->umin_value = umin_ptr + umin_val; - dst_reg->umax_value = umax_ptr + umax_val; + dst_reg->umin_value = umin_cur; + dst_reg->umax_value = umax_cur; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; @@ -13362,22 +13344,24 @@ static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, s32 smax_val = src_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; + s32 smin_cur, smax_cur; + u32 umin_cur, umax_cur; - if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || - signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { + if (check_add_overflow(dst_reg->s32_min_value, smin_val, &smin_cur) || + check_add_overflow(dst_reg->s32_max_value, smax_val, &smax_cur)) { dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { - dst_reg->s32_min_value += smin_val; - dst_reg->s32_max_value += smax_val; + dst_reg->s32_min_value = smin_cur; + dst_reg->s32_max_value = smax_cur; } - if (dst_reg->u32_min_value + umin_val < umin_val || - dst_reg->u32_max_value + umax_val < umax_val) { + if (check_add_overflow(dst_reg->u32_min_value, umin_val, &umin_cur) || + check_add_overflow(dst_reg->u32_max_value, umax_val, &umax_cur)) { dst_reg->u32_min_value = 0; dst_reg->u32_max_value = U32_MAX; } else { - dst_reg->u32_min_value += umin_val; - dst_reg->u32_max_value += umax_val; + dst_reg->u32_min_value = umin_cur; + dst_reg->u32_max_value = umax_cur; } } @@ -13388,22 +13372,24 @@ static void scalar_min_max_add(struct bpf_reg_state *dst_reg, s64 smax_val = src_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; + s64 smin_cur, smax_cur; + u64 umin_cur, umax_cur; - if (signed_add_overflows(dst_reg->smin_value, smin_val) || - signed_add_overflows(dst_reg->smax_value, smax_val)) { + if (check_add_overflow(dst_reg->smin_value, smin_val, &smin_cur) || + check_add_overflow(dst_reg->smax_value, smax_val, &smax_cur)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { - dst_reg->smin_value += smin_val; - dst_reg->smax_value += smax_val; + dst_reg->smin_value = smin_cur; + dst_reg->smax_value = smax_cur; } - if (dst_reg->umin_value + umin_val < umin_val || - dst_reg->umax_value + umax_val < umax_val) { + if (check_add_overflow(dst_reg->umin_value, umin_val, &umin_cur) || + check_add_overflow(dst_reg->umax_value, umax_val, &umax_cur)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { - dst_reg->umin_value += umin_val; - dst_reg->umax_value += umax_val; + dst_reg->umin_value = umin_cur; + dst_reg->umax_value = umax_cur; } } -- 2.45.2