On Thu, May 26, 2022 at 11:55 AM Yonghong Song <yhs@xxxxxx> wrote: > > The enum64 relocation support is added. The bpf local type > could be either enum or enum64 and the remote type could be > either enum or enum64 too. The all combinations of local enum/enum64 > and remote enum/enum64 are supported. > > Signed-off-by: Yonghong Song <yhs@xxxxxx> > --- > tools/lib/bpf/btf.h | 7 +++++ > tools/lib/bpf/libbpf.c | 7 ++--- > tools/lib/bpf/relo_core.c | 54 +++++++++++++++++++++++++++------------ > 3 files changed, 48 insertions(+), 20 deletions(-) > [...] > local_essent_len = bpf_core_essential_name_len(local_acc->name); > > - for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) { > - targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); > + for (i = 0; i < btf_vlen(targ_type); i++) { > + if (btf_is_enum(targ_type)) > + name_off = btf_enum(targ_type)[i].name_off; > + else > + name_off = btf_enum64(targ_type)[i].name_off; > + > + targ_name = btf__name_by_offset(targ_spec->btf, name_off); > targ_essent_len = bpf_core_essential_name_len(targ_name); > if (targ_essent_len != local_essent_len) > continue; > @@ -680,8 +688,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, > *val = byte_sz; > break; > case BPF_CORE_FIELD_SIGNED: > - /* enums will be assumed unsigned */ > - *val = btf_is_enum(mt) || > + *val = btf_type_is_any_enum(mt) || wouldn't this be more correct with kflag meaning "signed": (btf_type_is_any_enum(mt) && btf_kflag(mt)) || ? > (btf_int_encoding(mt) & BTF_INT_SIGNED); > if (validate) > *validate = true; /* signedness is never ambiguous */ > @@ -754,7 +761,6 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, > __u64 *val) > { > const struct btf_type *t; > - const struct btf_enum *e; > > switch (relo->kind) { > case BPF_CORE_ENUMVAL_EXISTS: > @@ -764,8 +770,10 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, > if (!spec) > return -EUCLEAN; /* request instruction poisoning */ > t = btf_type_by_id(spec->btf, spec->spec[0].type_id); > - e = btf_enum(t) + spec->spec[0].idx; > - *val = e->val; > + if (btf_is_enum(t)) > + *val = btf_enum(t)[spec->spec[0].idx].val; > + else > + *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx); > break; > default: > return -EOPNOTSUPP; > @@ -1060,7 +1068,6 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, > int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec) > { > const struct btf_type *t; > - const struct btf_enum *e; > const char *s; > __u32 type_id; > int i, len = 0; > @@ -1089,10 +1096,23 @@ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *s > > if (core_relo_is_enumval_based(spec->relo_kind)) { > t = skip_mods_and_typedefs(spec->btf, type_id, NULL); > - e = btf_enum(t) + spec->raw_spec[0]; > - s = btf__name_by_offset(spec->btf, e->name_off); > + if (btf_is_enum(t)) { > + const struct btf_enum *e; > + const char *fmt_str; > + > + e = btf_enum(t) + spec->raw_spec[0]; > + s = btf__name_by_offset(spec->btf, e->name_off); > + fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u"; minor nit: btf_kflag(t) instead of BTF_INFO_KFLAGS(t->info)? > + append_buf(fmt_str, s, e->val); > + } else { > + const struct btf_enum64 *e; > + const char *fmt_str; > > - append_buf("::%s = %u", s, e->val); > + e = btf_enum64(t) + spec->raw_spec[0]; > + s = btf__name_by_offset(spec->btf, e->name_off); > + fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu"; > + append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e)); > + } > return len; > } > > -- > 2.30.2 >