On Wed, Jan 31, 2024 at 05:24:08PM +0100, Viktor Malik wrote: > Instead of using magic offsets to access BTF ID set data, leverage types > from btf_ids.h (btf_id_set and btf_id_set8) which define the actual > layout of the data. Thanks to this change, set sorting should also > continue working if the layout changes. > > This requires to sync the definition of 'struct btf_id_set8' from > include/linux/btf_ids.h to tools/include/linux/btf_ids.h. We don't sync > the rest of the file at the moment, b/c that would require to also sync > multiple dependent headers and we don't need any other defs from > btf_ids.h. > > Signed-off-by: Viktor Malik <vmalik@xxxxxxxxxx> > --- > tools/bpf/resolve_btfids/main.c | 30 ++++++++++++++++++++++-------- > tools/include/linux/btf_ids.h | 9 +++++++++ > 2 files changed, 31 insertions(+), 8 deletions(-) > > diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c > index 27a23196d58e..7badf1557e5c 100644 > --- a/tools/bpf/resolve_btfids/main.c > +++ b/tools/bpf/resolve_btfids/main.c > @@ -70,6 +70,7 @@ > #include <sys/stat.h> > #include <fcntl.h> > #include <errno.h> > +#include <linux/btf_ids.h> > #include <linux/rbtree.h> > #include <linux/zalloc.h> > #include <linux/err.h> > @@ -78,7 +79,7 @@ > #include <subcmd/parse-options.h> > > #define BTF_IDS_SECTION ".BTF_ids" > -#define BTF_ID "__BTF_ID__" > +#define BTF_ID_PREFIX "__BTF_ID__" nit does not look necessary to me > > #define BTF_STRUCT "struct" > #define BTF_UNION "union" > @@ -161,7 +162,7 @@ static int eprintf(int level, int var, const char *fmt, ...) > > static bool is_btf_id(const char *name) > { > - return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1); > + return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1); > } > > static struct btf_id *btf_id__find(struct rb_root *root, const char *name) > @@ -441,7 +442,7 @@ static int symbols_collect(struct object *obj) > * __BTF_ID__TYPE__vfs_truncate__0 > * prefix = ^ > */ > - prefix = name + sizeof(BTF_ID) - 1; > + prefix = name + sizeof(BTF_ID_PREFIX) - 1; > > /* struct */ > if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) { > @@ -656,8 +657,8 @@ static int sets_patch(struct object *obj) > while (next) { > unsigned long addr, idx; > struct btf_id *id; > - int *base; > - int cnt; > + void *base; > + int cnt, size; > > id = rb_entry(next, struct btf_id, rb_node); > addr = id->addr[0]; > @@ -671,13 +672,26 @@ static int sets_patch(struct object *obj) > } > > idx = idx / sizeof(int); > - base = &ptr[idx] + (id->is_set8 ? 2 : 1); > - cnt = ptr[idx]; > + if (id->is_set) { > + struct btf_id_set *set; > + > + set = (struct btf_id_set *)&ptr[idx]; > + base = set->ids; > + cnt = set->cnt; > + size = sizeof(set->ids[0]); > + } else { > + struct btf_id_set8 *set8; > + > + set8 = (struct btf_id_set8 *)&ptr[idx]; > + base = set8->pairs; > + cnt = set8->cnt; > + size = sizeof(set8->pairs[0]); > + } > > pr_debug("sorting addr %5lu: cnt %6d [%s]\n", > (idx + 1) * sizeof(int), cnt, id->name); > > - qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id); > + qsort(base, cnt, size, cmp_id); maybe we could call qsort on top of each set type, seems simpler: while (next) { + struct btf_id_set8 *set8; + struct btf_id_set *set; unsigned long addr, idx; struct btf_id *id; - int *base; - int cnt; id = rb_entry(next, struct btf_id, rb_node); addr = id->addr[0]; @@ -671,13 +672,16 @@ static int sets_patch(struct object *obj) } idx = idx / sizeof(int); - base = &ptr[idx] + (id->is_set8 ? 2 : 1); - cnt = ptr[idx]; + if (id->is_set) { + set = (struct btf_id_set *)&ptr[idx]; + qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id); + } else { + set8 = (struct btf_id_set8 *)&ptr[idx]; + qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id); + } pr_debug("sorting addr %5lu: cnt %6d [%s]\n", - (idx + 1) * sizeof(int), cnt, id->name); - - qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id); + (idx + 1) * sizeof(int), id->is_set ? set->cnt : set8->cnt, id->name); next = rb_next(next); } jirka