Adds bit perf_event_attr::sigtrap, which can be set to cause events to send SIGTRAP (with si_code TRAP_PERF) to the task where the event occurred. To distinguish perf events and allow user space to decode si_perf (if set), the event type is set in si_errno. The primary motivation is to support synchronous signals on perf events in the task where an event (such as breakpoints) triggered. Link: https://lore.kernel.org/lkml/YBv3rAT566k+6zjg@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Acked-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Signed-off-by: Marco Elver <elver@xxxxxxxxxx> --- v2: * Use atomic_set(&event_count, 1), since it must always be 0 in perf_pending_event_disable(). * Implicitly restrict inheriting events if sigtrap, but the child was cloned with CLONE_CLEAR_SIGHAND, because it is not generally safe if the child cleared all signal handlers to continue sending SIGTRAP. --- include/uapi/linux/perf_event.h | 3 ++- kernel/events/core.c | 28 +++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 8c5b9f5ad63f..3a4dbb1688f0 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -391,7 +391,8 @@ struct perf_event_attr { build_id : 1, /* use build id in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ - __reserved_1 : 27; + sigtrap : 1, /* send synchronous SIGTRAP on event */ + __reserved_1 : 26; union { __u32 wakeup_events; /* wakeup every n events */ diff --git a/kernel/events/core.c b/kernel/events/core.c index b6434697c516..1e4c949bf75f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6391,6 +6391,17 @@ void perf_event_wakeup(struct perf_event *event) } } +static void perf_sigtrap(struct perf_event *event) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = SIGTRAP; + info.si_code = TRAP_PERF; + info.si_errno = event->attr.type; + force_sig_info(&info); +} + static void perf_pending_event_disable(struct perf_event *event) { int cpu = READ_ONCE(event->pending_disable); @@ -6400,6 +6411,13 @@ static void perf_pending_event_disable(struct perf_event *event) if (cpu == smp_processor_id()) { WRITE_ONCE(event->pending_disable, -1); + + if (event->attr.sigtrap) { + atomic_set(&event->event_limit, 1); /* rearm event */ + perf_sigtrap(event); + return; + } + perf_event_disable_local(event); return; } @@ -11428,6 +11446,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, event->state = PERF_EVENT_STATE_INACTIVE; + if (event->attr.sigtrap) + atomic_set(&event->event_limit, 1); + if (task) { event->attach_state = PERF_ATTACH_TASK; /* @@ -11706,6 +11727,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (attr->remove_on_exec && attr->enable_on_exec) return -EINVAL; + if (attr->sigtrap && !attr->remove_on_exec) + return -EINVAL; + out: return ret; @@ -12932,7 +12956,9 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *child_ctx; if (!event->attr.inherit || - (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) { + (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || + /* Do not inherit if sigtrap and signal handlers were cleared. */ + (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { *inherited_all = 0; return 0; } -- 2.31.0.291.g576ba9dcdaf-goog