Similar to previous patch that drops signed_add*_overflows() and uses (compiler) builtin-based check_add_overflow(), do the same for signed_sub*_overflows() and replace them with the generic check_sub_overflow() to make future refactoring easier. Unsigned overflow check for subtraction does not use helpers and are simple enough already, so they're left untouched. Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@xxxxxxxx> --- kernel/bpf/verifier.c | 46 +++++++++++++------------------------------ 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b1ad76c514f5..2c1657a26fdb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12720,26 +12720,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } -static bool signed_sub_overflows(s64 a, s64 b) -{ - /* Do the sub in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a - (u64)b); - - if (b < 0) - return res < a; - return res > a; -} - -static bool signed_sub32_overflows(s32 a, s32 b) -{ - /* Do the sub in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a - (u32)b); - - if (b < 0) - return res < a; - return res > a; -} - static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) @@ -13280,14 +13260,14 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ - if (signed_sub_overflows(smin_ptr, smax_val) || - signed_sub_overflows(smax_ptr, smin_val)) { + if (check_sub_overflow(smin_ptr, smax_val, &smin_cur) || + check_sub_overflow(smax_ptr, smin_val, &smax_cur)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { - dst_reg->smin_value = smin_ptr - smax_val; - dst_reg->smax_value = smax_ptr - smin_val; + dst_reg->smin_value = smin_cur; + dst_reg->smax_value = smax_cur; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ @@ -13400,15 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, s32 smax_val = src_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; + s32 smin_cur, smax_cur; - if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || - signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { + if (check_sub_overflow(dst_reg->s32_min_value, smax_val, &smin_cur) || + check_sub_overflow(dst_reg->s32_max_value, smin_val, &smax_cur)) { /* Overflow possible, we know nothing */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { - dst_reg->s32_min_value -= smax_val; - dst_reg->s32_max_value -= smin_val; + dst_reg->s32_min_value = smin_cur; + dst_reg->s32_max_value = smax_cur; } if (dst_reg->u32_min_value < umax_val) { /* Overflow possible, we know nothing */ @@ -13428,15 +13409,16 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, s64 smax_val = src_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; + s64 smin_cur, smax_cur; - if (signed_sub_overflows(dst_reg->smin_value, smax_val) || - signed_sub_overflows(dst_reg->smax_value, smin_val)) { + if (check_sub_overflow(dst_reg->smin_value, smax_val, &smin_cur) || + check_sub_overflow(dst_reg->smax_value, smin_val, &smax_cur)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { - dst_reg->smin_value -= smax_val; - dst_reg->smax_value -= smin_val; + dst_reg->smin_value = smin_cur; + dst_reg->smax_value = smax_cur; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ -- 2.45.2