On Sat, Jul 6, 2019 at 10:18 AM Yonghong Song <yhs@xxxxxx> wrote: > > > > On 7/5/19 11:02 PM, Andrii Nakryiko wrote: > > Add test verifying perf buffer API functionality. > > > > Signed-off-by: Andrii Nakryiko <andriin@xxxxxx> > > Acked-by: Song Liu <songliubraving@xxxxxx> > > --- > > .../selftests/bpf/prog_tests/perf_buffer.c | 94 +++++++++++++++++++ > > .../selftests/bpf/progs/test_perf_buffer.c | 25 +++++ > > 2 files changed, 119 insertions(+) > > create mode 100644 tools/testing/selftests/bpf/prog_tests/perf_buffer.c > > create mode 100644 tools/testing/selftests/bpf/progs/test_perf_buffer.c > > > > diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c > > new file mode 100644 > > index 000000000000..64556ab0d1a9 > > --- /dev/null > > +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c > > @@ -0,0 +1,94 @@ > > +// SPDX-License-Identifier: GPL-2.0 > > +#define _GNU_SOURCE > > +#include <pthread.h> > > +#include <sched.h> > > +#include <sys/socket.h> > > +#include <test_progs.h> > > + > > +static void on_sample(void *ctx, int cpu, void *data, __u32 size) > > +{ > > + int cpu_data = *(int *)data, duration = 0; > > + cpu_set_t *cpu_seen = ctx; > > + > > + if (cpu_data != cpu) > > + CHECK(cpu_data != cpu, "check_cpu_data", > > + "cpu_data %d != cpu %d\n", cpu_data, cpu); > > + > > + CPU_SET(cpu, cpu_seen); > > +} > > + > > +void test_perf_buffer(void) > > +{ > > + int err, prog_fd, nr_cpus, i, duration = 0; > > + const char *prog_name = "kprobe/sys_nanosleep"; > > + const char *file = "./test_perf_buffer.o"; > > + struct perf_buffer_opts pb_opts = {}; > > + struct bpf_map *perf_buf_map; > > + cpu_set_t cpu_set, cpu_seen; > > + struct bpf_program *prog; > > + struct bpf_object *obj; > > + struct perf_buffer *pb; > > + struct bpf_link *link; > > + > > + nr_cpus = libbpf_num_possible_cpus(); > > + if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) > > + return; > > + > > + /* load program */ > > + err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); > > + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) > > + return; > > + > > + prog = bpf_object__find_program_by_title(obj, prog_name); > > + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) > > + goto out_close; > > + > > + /* load map */ > > + perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); > > + if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) > > + goto out_close; > > + > > + /* attach kprobe */ > > + link = bpf_program__attach_kprobe(prog, false /* retprobe */, > > + "sys_nanosleep"); > > The attach function "sys_nanosleep" won't work. You can have something > similar to attach_probe.c. > > #ifdef __x86_64__ > #define SYS_KPROBE_NAME "__x64_sys_nanosleep" > #else > #define SYS_KPROBE_NAME "sys_nanosleep" > #endif > Yeah, this is going to be a nightmare with those arch-specific names. I'm wondering if it's worth it to automatically do that in libbpf for users... But anyway, will fix in v7, thanks! > > > + if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) > > + goto out_close; > > + > > + /* set up perf buffer */ > > + pb_opts.sample_cb = on_sample; > > + pb_opts.ctx = &cpu_seen; > > + pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); > > + if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) > > + goto out_detach; > > + > > + /* trigger kprobe on every CPU */ > > + CPU_ZERO(&cpu_seen); > > + for (i = 0; i < nr_cpus; i++) { > > + CPU_ZERO(&cpu_set); > > + CPU_SET(i, &cpu_set); > > + > > + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), > > + &cpu_set); > > + if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", > > + i, err)) > > + goto out_detach; > > + > > + usleep(1); > > + } > > + > > + /* read perf buffer */ > > + err = perf_buffer__poll(pb, 100); > > + if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) > > + goto out_free_pb; > > + > > + if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt", > > + "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen))) > > + goto out_free_pb; > > + > > +out_free_pb: > > + perf_buffer__free(pb); > > +out_detach: > > + bpf_link__destroy(link); > > +out_close: > > + bpf_object__close(obj); > > +} > > diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c > > new file mode 100644 > > index 000000000000..876c27deb65a > > --- /dev/null > > +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c > > @@ -0,0 +1,25 @@ > > +// SPDX-License-Identifier: GPL-2.0 > > +// Copyright (c) 2019 Facebook > > + > > +#include <linux/ptrace.h> > > +#include <linux/bpf.h> > > +#include "bpf_helpers.h" > > + > > +struct { > > + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); > > + __uint(key_size, sizeof(int)); > > + __uint(value_size, sizeof(int)); > > +} perf_buf_map SEC(".maps"); > > + > > +SEC("kprobe/sys_nanosleep") > > +int handle_sys_nanosleep_entry(struct pt_regs *ctx) > > +{ > > + int cpu = bpf_get_smp_processor_id(); > > + > > + bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU, > > + &cpu, sizeof(cpu)); > > + return 0; > > +} > > + > > +char _license[] SEC("license") = "GPL"; > > +__u32 _version SEC("version") = 1; > >