On Fri, Mar 04, 2022 at 08:21:34AM -0800, Yonghong Song wrote: > > > diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c > > index 7c384d10e95f..6c6e7c90cc3d 100644 > > --- a/tools/bpf/bpftool/pids.c > > +++ b/tools/bpf/bpftool/pids.c > > @@ -78,6 +78,8 @@ static void add_ref(struct hashmap *map, struct pid_iter_entry *e) > > ref->pid = e->pid; > > memcpy(ref->comm, e->comm, sizeof(ref->comm)); > > refs->ref_cnt = 1; > > + refs->bpf_cookie_set = e->bpf_cookie_set; > > + refs->bpf_cookie = e->bpf_cookie; > > err = hashmap__append(map, u32_as_hash_field(e->id), refs); > > if (err) > > @@ -205,6 +207,9 @@ void emit_obj_refs_json(struct hashmap *map, __u32 id, > > if (refs->ref_cnt == 0) > > break; > > + if (refs->bpf_cookie_set) > > + jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie); > > The original motivation for 'bpf_cookie' is for kprobe to get function > addresses. In that case, printing with llx (0x...) is better than llu > since people can easily search it with /proc/kallsyms to get what the > function it attached to. But on the other hand, other use cases might > be simply just wanting an int. > > I don't have a strong opinion here. Just to speak out loud so other > people can comment on this too. Interesting, I didn't know that. The current implementation of 'bpf_cookie' seems to be quite opaque, with no assumptions about what does it contain, probably it makes sense to keep it like that. But I don't have a strong opinion here either, would love to hear what others think. > > diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c > > index f70702fcb224..91366ce33717 100644 > > --- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c > > +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c > > @@ -38,6 +38,18 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type) > > } > > } > > +/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */ > > +static __always_inline __u64 get_bpf_cookie(struct bpf_link *link) > > +{ > > + struct bpf_perf_link *perf_link; > > + struct perf_event *event; > > + > > + perf_link = container_of(link, struct bpf_perf_link, link); > > + event = BPF_CORE_READ(perf_link, perf_file, private_data); > > + return BPF_CORE_READ(event, bpf_cookie); > > +} > > + > > + > > SEC("iter/task_file") > > int iter(struct bpf_iter__task_file *ctx) > > { > > @@ -69,8 +81,21 @@ int iter(struct bpf_iter__task_file *ctx) > > if (file->f_op != fops) > > return 0; > > + __builtin_memset(&e, 0, sizeof(e)); > > e.pid = task->tgid; > > e.id = get_obj_id(file->private_data, obj_type); > > + e.bpf_cookie = 0; > > + e.bpf_cookie_set = false; > > We already have __builtin_memset(&e, 0, sizeof(e)) in the above, so > the above e.bpf_cookie and e.bpf_cookie_set assignment is not > necessary. Good point, will remote this.