On Tue, Jan 14, 2020 at 05:49:00PM -0800, Andrii Nakryiko wrote: > On Tue, Jan 14, 2020 at 2:46 PM Martin KaFai Lau <kafai@xxxxxx> wrote: > > > > This patch makes bpftool support dumping a map's value properly > > when the map's value type is a type of the running kernel's btf. > > (i.e. map_info.btf_vmlinux_value_type_id is set instead of > > map_info.btf_value_type_id). The first usecase is for the > > BPF_MAP_TYPE_STRUCT_OPS. > > > > Signed-off-by: Martin KaFai Lau <kafai@xxxxxx> > > --- > > tools/bpf/bpftool/map.c | 43 +++++++++++++++++++++++++++++++---------- > > 1 file changed, 33 insertions(+), 10 deletions(-) > > > > diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c > > index 4c5b15d736b6..d25f3b2355ad 100644 > > --- a/tools/bpf/bpftool/map.c > > +++ b/tools/bpf/bpftool/map.c > > @@ -20,6 +20,7 @@ > > #include "btf.h" > > #include "json_writer.h" > > #include "main.h" > > +#include "libbpf_internal.h" > > > > const char * const map_type_name[] = { > > [BPF_MAP_TYPE_UNSPEC] = "unspec", > > @@ -252,6 +253,7 @@ static int do_dump_btf(const struct btf_dumper *d, > > struct bpf_map_info *map_info, void *key, > > void *value) > > { > > + __u32 value_id; > > int ret; > > > > /* start of key-value pair */ > > @@ -265,9 +267,12 @@ static int do_dump_btf(const struct btf_dumper *d, > > goto err_end_obj; > > } > > > > + value_id = map_info->btf_vmlinux_value_type_id ? > > + : map_info->btf_value_type_id; > > + > > if (!map_is_per_cpu(map_info->type)) { > > jsonw_name(d->jw, "value"); > > - ret = btf_dumper_type(d, map_info->btf_value_type_id, value); > > + ret = btf_dumper_type(d, value_id, value); > > } else { > > unsigned int i, n, step; > > > > @@ -279,8 +284,7 @@ static int do_dump_btf(const struct btf_dumper *d, > > jsonw_start_object(d->jw); > > jsonw_int_field(d->jw, "cpu", i); > > jsonw_name(d->jw, "value"); > > - ret = btf_dumper_type(d, map_info->btf_value_type_id, > > - value + i * step); > > + ret = btf_dumper_type(d, value_id, value + i * step); > > jsonw_end_object(d->jw); > > if (ret) > > break; > > @@ -932,6 +936,27 @@ static int maps_have_btf(int *fds, int nb_fds) > > return 1; > > } > > > > +static struct btf *get_map_kv_btf(const struct bpf_map_info *info) > > +{ > > + struct btf *btf = NULL; > > + > > + if (info->btf_vmlinux_value_type_id) { > > + btf = bpf_find_kernel_btf(); > > If there are multiple maps we are dumping, it might become quite > costly to re-read and re-parse kernel BTF all the time. Can we lazily > load it, when required, It is loaded lazily. > and cache instead? Cache it in bpftool/map.c? Sure. > > > + if (IS_ERR(btf)) > > + p_err("failed to get kernel btf"); > > + } else if (info->btf_value_type_id) { > > + int err; > > + > > + err = btf__get_from_id(info->btf_id, &btf); > > + if (err || !btf) { > > + p_err("failed to get btf"); > > + btf = err ? ERR_PTR(err) : ERR_PTR(-ESRCH); > > + } > > + } > > + > > + return btf; > > +} > > + > > static int > > map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr, > > bool show_header) > > @@ -952,13 +977,11 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr, > > prev_key = NULL; > > > > if (wtr) { > > - if (info->btf_id) { > > - err = btf__get_from_id(info->btf_id, &btf); > > - if (err || !btf) { > > - err = err ? : -ESRCH; > > - p_err("failed to get btf"); > > - goto exit_free; > > - } > > + btf = get_map_kv_btf(info); > > + if (IS_ERR(btf)) { > > + err = PTR_ERR(btf); > > + btf = NULL; > > + goto exit_free; > > } > > > > if (show_header) { > > -- > > 2.17.1 > >