> On Feb 22, 2022, at 11:29 AM, Mykola Lysenko <mykolal@xxxxxx> wrote: > > In particular: > 1) remove output of inv for scalars > 2) remove _value suffixes for umin/umax/s32_min/etc (except map_value) > 3) remove output of id=0 > 4) remove output of ref_obj_id=0 > > Signed-off-by: Mykola Lysenko <mykolal@xxxxxx> > --- > kernel/bpf/verifier.c | 58 ++--- > .../testing/selftests/bpf/prog_tests/align.c | 218 +++++++++--------- > .../selftests/bpf/prog_tests/log_buf.c | 4 +- > 3 files changed, 142 insertions(+), 138 deletions(-) > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index d7473fee247c..fe8e279ad6a4 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -539,7 +539,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env, > char postfix[16] = {0}, prefix[32] = {0}; > static const char * const str[] = { > [NOT_INIT] = "?", > - [SCALAR_VALUE] = "inv", > + [SCALAR_VALUE] = "", > [PTR_TO_CTX] = "ctx", > [CONST_PTR_TO_MAP] = "map_ptr", > [PTR_TO_MAP_VALUE] = "map_value", > @@ -693,69 +693,73 @@ static void print_verifier_state(struct bpf_verifier_env *env, > /* reg->off should be 0 for SCALAR_VALUE */ > verbose(env, "%lld", reg->var_off.value + reg->off); > } else { > + const char *sep = ""; > + > if (base_type(t) == PTR_TO_BTF_ID || > base_type(t) == PTR_TO_PERCPU_BTF_ID) > verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); > - verbose(env, "(id=%d", reg->id); > - if (reg_type_may_be_refcounted_or_null(t)) > - verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); > + > + verbose(env, "("); > + > +#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) Will add comment what _a stands for > + > + if (reg->id) > + verbose_a("id=%d", reg->id); > + > + if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id) > + verbose_a("ref_obj_id=%d", reg->ref_obj_id); > if (t != SCALAR_VALUE) > - verbose(env, ",off=%d", reg->off); > + verbose_a("off=%d", reg->off); > if (type_is_pkt_pointer(t)) > - verbose(env, ",r=%d", reg->range); > + verbose_a("r=%d", reg->range); > else if (base_type(t) == CONST_PTR_TO_MAP || > base_type(t) == PTR_TO_MAP_KEY || > base_type(t) == PTR_TO_MAP_VALUE) > - verbose(env, ",ks=%d,vs=%d", > - reg->map_ptr->key_size, > - reg->map_ptr->value_size); > + verbose_a("ks=%d,vs=%d", > + reg->map_ptr->key_size, > + reg->map_ptr->value_size); > if (tnum_is_const(reg->var_off)) { > /* Typically an immediate SCALAR_VALUE, but > * could be a pointer whose offset is too big > * for reg->off > */ > - verbose(env, ",imm=%llx", reg->var_off.value); > + verbose_a("imm=%llx", reg->var_off.value); > } else { > if (reg->smin_value != reg->umin_value && > reg->smin_value != S64_MIN) > - verbose(env, ",smin_value=%lld", > - (long long)reg->smin_value); > + verbose_a("smin=%lld", (long long)reg->smin_value); > if (reg->smax_value != reg->umax_value && > reg->smax_value != S64_MAX) > - verbose(env, ",smax_value=%lld", > - (long long)reg->smax_value); > + verbose_a("smax=%lld", (long long)reg->smax_value); > if (reg->umin_value != 0) > - verbose(env, ",umin_value=%llu", > - (unsigned long long)reg->umin_value); > + verbose_a("umin=%llu", (unsigned long long)reg->umin_value); > if (reg->umax_value != U64_MAX) > - verbose(env, ",umax_value=%llu", > - (unsigned long long)reg->umax_value); > + verbose_a("umax=%llu", (unsigned long long)reg->umax_value); > if (!tnum_is_unknown(reg->var_off)) { > char tn_buf[48]; > > tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); > - verbose(env, ",var_off=%s", tn_buf); > + verbose_a("var_off=%s", tn_buf); > } > if (reg->s32_min_value != reg->smin_value && > reg->s32_min_value != S32_MIN) > - verbose(env, ",s32_min_value=%d", > - (int)(reg->s32_min_value)); > + verbose_a("s32_min=%d", (int)(reg->s32_min_value)); > if (reg->s32_max_value != reg->smax_value && > reg->s32_max_value != S32_MAX) > - verbose(env, ",s32_max_value=%d", > - (int)(reg->s32_max_value)); > + verbose_a("s32_max=%d", (int)(reg->s32_max_value)); > if (reg->u32_min_value != reg->umin_value && > reg->u32_min_value != U32_MIN) > - verbose(env, ",u32_min_value=%d", > - (int)(reg->u32_min_value)); > + verbose_a("u32_min=%d", (int)(reg->u32_min_value)); > if (reg->u32_max_value != reg->umax_value && > reg->u32_max_value != U32_MAX) > - verbose(env, ",u32_max_value=%d", > - (int)(reg->u32_max_value)); > + verbose_a("u32_max=%d", (int)(reg->u32_max_value)); > } > verbose(env, ")"); > } > } > + > +#undef verbose_append This should match a macro definition name, will send a version 3 with a fix > + > for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { > char types_buf[BPF_REG_SIZE + 1]; > bool valid = false; > diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c > index 0ee29e11eaee..210dc6b4a169 100644 > --- a/tools/testing/selftests/bpf/prog_tests/align.c > +++ b/tools/testing/selftests/bpf/prog_tests/align.c > @@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {0, "R1=ctx(id=0,off=0,imm=0)"}, > + {0, "R1=ctx(off=0,imm=0)"}, > {0, "R10=fp0"}, > - {0, "R3_w=inv2"}, > - {1, "R3_w=inv4"}, > - {2, "R3_w=inv8"}, > - {3, "R3_w=inv16"}, > - {4, "R3_w=inv32"}, > + {0, "R3_w=2"}, > + {1, "R3_w=4"}, > + {2, "R3_w=8"}, > + {3, "R3_w=16"}, > + {4, "R3_w=32"}, > }, > }, > { > @@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {0, "R1=ctx(id=0,off=0,imm=0)"}, > + {0, "R1=ctx(off=0,imm=0)"}, > {0, "R10=fp0"}, > - {0, "R3_w=inv1"}, > - {1, "R3_w=inv2"}, > - {2, "R3_w=inv4"}, > - {3, "R3_w=inv8"}, > - {4, "R3_w=inv16"}, > - {5, "R3_w=inv1"}, > - {6, "R4_w=inv32"}, > - {7, "R4_w=inv16"}, > - {8, "R4_w=inv8"}, > - {9, "R4_w=inv4"}, > - {10, "R4_w=inv2"}, > + {0, "R3_w=1"}, > + {1, "R3_w=2"}, > + {2, "R3_w=4"}, > + {3, "R3_w=8"}, > + {4, "R3_w=16"}, > + {5, "R3_w=1"}, > + {6, "R4_w=32"}, > + {7, "R4_w=16"}, > + {8, "R4_w=8"}, > + {9, "R4_w=4"}, > + {10, "R4_w=2"}, > }, > }, > { > @@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {0, "R1=ctx(id=0,off=0,imm=0)"}, > + {0, "R1=ctx(off=0,imm=0)"}, > {0, "R10=fp0"}, > - {0, "R3_w=inv4"}, > - {1, "R3_w=inv8"}, > - {2, "R3_w=inv10"}, > - {3, "R4_w=inv8"}, > - {4, "R4_w=inv12"}, > - {5, "R4_w=inv14"}, > + {0, "R3_w=4"}, > + {1, "R3_w=8"}, > + {2, "R3_w=10"}, > + {3, "R4_w=8"}, > + {4, "R4_w=12"}, > + {5, "R4_w=14"}, > }, > }, > { > @@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {0, "R1=ctx(id=0,off=0,imm=0)"}, > + {0, "R1=ctx(off=0,imm=0)"}, > {0, "R10=fp0"}, > - {0, "R3_w=inv7"}, > - {1, "R3_w=inv7"}, > - {2, "R3_w=inv14"}, > - {3, "R3_w=inv56"}, > + {0, "R3_w=7"}, > + {1, "R3_w=7"}, > + {2, "R3_w=14"}, > + {3, "R3_w=56"}, > }, > }, > > @@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"}, > - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, > - {7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, > - {8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > - {9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, > - {10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, > - {12, "R3_w=pkt_end(id=0,off=0,imm=0)"}, > - {17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, > - {18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, > - {19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, > - {20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, > - {21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > - {22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, > + {6, "R0_w=pkt(off=8,r=8,imm=0)"}, > + {6, "R3_w=(umax=255,var_off=(0x0; 0xff))"}, > + {7, "R3_w=(umax=510,var_off=(0x0; 0x1fe))"}, > + {8, "R3_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > + {9, "R3_w=(umax=2040,var_off=(0x0; 0x7f8))"}, > + {10, "R3_w=(umax=4080,var_off=(0x0; 0xff0))"}, > + {12, "R3_w=pkt_end(off=0,imm=0)"}, > + {17, "R4_w=(umax=255,var_off=(0x0; 0xff))"}, > + {18, "R4_w=(umax=8160,var_off=(0x0; 0x1fe0))"}, > + {19, "R4_w=(umax=4080,var_off=(0x0; 0xff0))"}, > + {20, "R4_w=(umax=2040,var_off=(0x0; 0x7f8))"}, > + {21, "R4_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > + {22, "R4_w=(umax=510,var_off=(0x0; 0x1fe))"}, > }, > }, > { > @@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, > - {7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, > - {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, > - {9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, > - {10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, > - {11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, > - {12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > - {13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, > - {14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, > - {15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, > + {6, "R3_w=(umax=255,var_off=(0x0; 0xff))"}, > + {7, "R4_w=(id=1,umax=255,var_off=(0x0; 0xff))"}, > + {8, "R4_w=(umax=255,var_off=(0x0; 0xff))"}, > + {9, "R4_w=(id=1,umax=255,var_off=(0x0; 0xff))"}, > + {10, "R4_w=(umax=510,var_off=(0x0; 0x1fe))"}, > + {11, "R4_w=(id=1,umax=255,var_off=(0x0; 0xff))"}, > + {12, "R4_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > + {13, "R4_w=(id=1,umax=255,var_off=(0x0; 0xff))"}, > + {14, "R4_w=(umax=2040,var_off=(0x0; 0x7f8))"}, > + {15, "R4_w=(umax=4080,var_off=(0x0; 0xff0))"}, > }, > }, > { > @@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = { > }, > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .matches = { > - {2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, > - {4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, > - {5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, > - {9, "R2=pkt(id=0,off=0,r=18,imm=0)"}, > - {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, > - {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, > - {13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, > - {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, > + {2, "R5_w=pkt(off=0,r=0,imm=0)"}, > + {4, "R5_w=pkt(off=14,r=0,imm=0)"}, > + {5, "R4_w=pkt(off=14,r=0,imm=0)"}, > + {9, "R2=pkt(off=0,r=18,imm=0)"}, > + {10, "R5=pkt(off=14,r=18,imm=0)"}, > + {10, "R4_w=(umax=255,var_off=(0x0; 0xff))"}, > + {13, "R4_w=(umax=65535,var_off=(0x0; 0xffff))"}, > + {14, "R4_w=(umax=65535,var_off=(0x0; 0xffff))"}, > }, > }, > { > @@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = { > /* Calculated offset in R6 has unknown value, but known > * alignment of 4. > */ > - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, > - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, > + {7, "R6_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > /* Offset is added to packet pointer R5, resulting in > * known fixed offset, and variable offset from R6. > */ > - {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, > /* At the time the word size load is performed from R5, > * it's total offset is NET_IP_ALIGN + reg->off (0) + > * reg->aux_off (14) which is 16. Then the variable > * offset is considered using reg->aux_off_align which > * is 4 and meets the load's requirements. > */ > - {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, > - {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, > + {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, > /* Variable offset is added to R5 packet pointer, > * resulting in auxiliary alignment of 4. > */ > - {17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, > /* Constant offset is added to R5, resulting in > * reg->off of 14. > */ > - {18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off > * (14) which is 16. Then the variable offset is 4-byte > * aligned, so the total offset is 4-byte aligned and > * meets the load's requirements. > */ > - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, > - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, > + {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, > /* Constant offset is added to R5 packet pointer, > * resulting in reg->off value of 14. > */ > - {25, "R5_w=pkt(id=0,off=14,r=8"}, > + {25, "R5_w=pkt(off=14,r=8"}, > /* Variable offset is added to R5, resulting in a > * variable offset of (4n). > */ > - {26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, > /* Constant is added to R5 again, setting reg->off to 18. */ > - {27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, > /* And once more we add a variable; resulting var_off > * is still (4n), fixed offset is not changed. > * Also, we create a new reg->id. > */ > - {28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, > + {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off (18) > * which is 20. Then the variable offset is (4n), so > * the total offset is 4-byte aligned and meets the > * load's requirements. > */ > - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, > - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, > + {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, > + {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, > }, > }, > { > @@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = { > /* Calculated offset in R6 has unknown value, but known > * alignment of 4. > */ > - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, > - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, > + {7, "R6_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > /* Adding 14 makes R6 be (4n+2) */ > - {8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, > + {8, "R6_w=(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, > /* Packet pointer has (4n+2) offset */ > - {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, > - {12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, > + {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, > + {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off (0) > * which is 2. Then the variable offset is (4n+2), so > * the total offset is 4-byte aligned and meets the > * load's requirements. > */ > - {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, > + {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, > /* Newly read value in R6 was shifted left by 2, so has > * known alignment of 4. > */ > - {17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {17, "R6_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > /* Added (4n) to packet pointer's (4n+2) var_off, giving > * another (4n+2). > */ > - {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, > - {20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, > + {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, > + {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off (0) > * which is 2. Then the variable offset is (4n+2), so > * the total offset is 4-byte aligned and meets the > * load's requirements. > */ > - {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, > + {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, > }, > }, > { > @@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = { > .prog_type = BPF_PROG_TYPE_SCHED_CLS, > .result = REJECT, > .matches = { > - {3, "R5_w=pkt_end(id=0,off=0,imm=0)"}, > + {3, "R5_w=pkt_end(off=0,imm=0)"}, > /* (ptr - ptr) << 2 == unknown, (4n) */ > - {5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, > + {5, "R5_w=(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, > /* (4n) + 14 == (4n+2). We blow our bounds, because > * the add could overflow. > */ > - {6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, > + {6, "R5_w=(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, > /* Checked s>=0 */ > - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > + {9, "R5=(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > /* packet pointer + nonnegative (4n+2) */ > - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > - {12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > + {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > + {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. > * We checked the bounds, but it might have been able > * to overflow if the packet pointer started in the > @@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = { > * So we did not get a 'range' on R6, and the access > * attempt will fail. > */ > - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > + {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, > } > }, > { > @@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = { > /* Calculated offset in R6 has unknown value, but known > * alignment of 4. > */ > - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, > - {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, > + {8, "R6_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > /* Adding 14 makes R6 be (4n+2) */ > - {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, > + {9, "R6_w=(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, > /* New unknown value in R7 is (4n) */ > - {10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, > + {10, "R7_w=(umax=1020,var_off=(0x0; 0x3fc))"}, > /* Subtracting it from R6 blows our unsigned bounds */ > - {11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, > + {11, "R6=(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, > /* Checked s>= 0 */ > - {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, > + {14, "R6=(umin=2,umax=1034,var_off=(0x2; 0x7fc))"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off (0) > * which is 2. Then the variable offset is (4n+2), so > * the total offset is 4-byte aligned and meets the > * load's requirements. > */ > - {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"}, > + {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"}, > > }, > }, > @@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = { > /* Calculated offset in R6 has unknown value, but known > * alignment of 4. > */ > - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, > - {9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, > + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, > + {9, "R6_w=(umax=60,var_off=(0x0; 0x3c))"}, > /* Adding 14 makes R6 be (4n+2) */ > - {10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, > + {10, "R6_w=(umin=14,umax=74,var_off=(0x2; 0x7c))"}, > /* Subtracting from packet pointer overflows ubounds */ > - {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, > + {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, > /* New unknown value in R7 is (4n), >= 76 */ > - {14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, > + {14, "R7_w=(umin=76,umax=1096,var_off=(0x0; 0x7fc))"}, > /* Adding it to packet pointer gives nice bounds again */ > - {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, > + {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, > /* At the time the word size load is performed from R5, > * its total fixed offset is NET_IP_ALIGN + reg->off (0) > * which is 2. Then the variable offset is (4n+2), so > * the total offset is 4-byte aligned and meets the > * load's requirements. > */ > - {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, > + {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, > }, > }, > }; > @@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test) > /* Check the next line as well in case the previous line > * did not have a corresponding bpf insn. Example: > * func#0 @0 > - * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 > - * 0: (b7) r3 = 2 ; R3_w=inv2 > + * 0: R1=ctx(off=0,imm=0) R10=fp0 > + * 0: (b7) r3 = 2 ; R3_w=2 > */ > if (!strstr(line_ptr, m.match)) { > cur_line = -1; > diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c > index 1ef377a7e731..fe9a23e65ef4 100644 > --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c > +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c > @@ -78,7 +78,7 @@ static void obj_load_log_buf(void) > ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), > "libbpf_log_not_empty"); > ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); > - ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), > + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), > "good_log_verbose"); > ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), > "bad_log_not_empty"); > @@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void) > opts.log_level = 2; > fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", > good_prog_insns, good_prog_insn_cnt, &opts); > - ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); > + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2"); > ASSERT_GE(fd, 0, "good_fd2"); > if (fd >= 0) > close(fd); > -- > 2.30.2 >