This commit enables the BPF backend in a perf record session for AUX pause. It introduces an option "--bpf-aux-pause" for specifying kprobe and tracepoint events for triggering AUX pause and resume. The BPF program will be loaded after the option is set, and the events will be updated into the BPF map, so the BPF program in kernel knows to control AUX pause and resume based on the event handler. After: perf record -e cs_etm/aux-action=start-paused/ \ --bpf-aux-pause="kretprobe:__arm64_sys_openat:p,kprobe:__arm64_sys_openat:r,tp:sched:sched_switch:r" \ -a -- ls perf record -e cs_etm/aux-action=start-paused/ \ --bpf-aux-pause="kretprobe:__arm64_sys_openat:p,kprobe:__arm64_sys_openat:r,tp:sched:sched_switch:r" \ --per-thread -i -- ls Signed-off-by: Leo Yan <leo.yan@xxxxxxx> --- tools/perf/builtin-record.c | 18 +++++++++++++++++- tools/perf/util/evsel.c | 6 ++++++ tools/perf/util/record.h | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 49361c5b2251..ae6bb23e0233 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -864,7 +864,12 @@ static int record__auxtrace_init(struct record *rec) if (err) return err; - return auxtrace_parse_filters(rec->evlist); + err = auxtrace_parse_filters(rec->evlist); + if (err) + return err; + + return auxtrace__prepare_bpf(rec->itr, + rec->opts.auxtrace_bpf_aux_pause_opts); } #else @@ -2486,6 +2491,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) evlist__config(rec->evlist, opts, &callchain_param); + err = auxtrace__set_bpf_filter(rec->evlist, opts); + if (err) + goto out_free_threads; + /* Debug message used by test scripts */ pr_debug3("perf record opening and mmapping events\n"); if (record__open(rec) != 0) { @@ -2556,6 +2565,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (record__start_threads(rec)) goto out_free_threads; + if (auxtrace__enable_bpf()) + goto out_free_threads; + /* * When perf is starting the traced process, all the events * (apart from group members) have enable_on_exec=1 set, @@ -2875,6 +2887,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } out_delete_session: + auxtrace__cleanup_bpf(); #ifdef HAVE_EVENTFD_SUPPORT if (done_fd >= 0) { fd = done_fd; @@ -3566,6 +3579,9 @@ static struct option __record_options[] = { OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"), OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin", "BPF filter action"), + OPT_STRING_OPTARG(0, "bpf-aux-pause", &record.opts.auxtrace_bpf_aux_pause_opts, + "{kprobe|kretprobe|tp|tracepoint}:{category}:trace_name:{p|r}", + "Enable AUX pause with BPF backend", ""), OPT_END() }; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f6a5284ed5f9..a77053e546bc 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2586,6 +2586,12 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, bpf_counter__install_pe(evsel, idx, fd); + /* Update event info into BPF map for AUX trace */ + if (auxtrace__update_bpf_map(evsel, idx, fd)) { + err = -EINVAL; + goto out_close; + } + if (unlikely(test_attr__enabled())) { test_attr__open(&evsel->core.attr, pid, cpu, fd, group_fd, evsel->open_flags); diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h index a6566134e09e..051a4b71721b 100644 --- a/tools/perf/util/record.h +++ b/tools/perf/util/record.h @@ -64,6 +64,7 @@ struct record_opts { size_t auxtrace_snapshot_size; const char *auxtrace_snapshot_opts; const char *auxtrace_sample_opts; + const char *auxtrace_bpf_aux_pause_opts; bool sample_transaction; bool use_clockid; clockid_t clockid; -- 2.34.1