Adds bit perf_event_attr::inherit_thread, to restricting inheriting events only if the child was cloned with CLONE_THREAD. This option supports the case where an event is supposed to be process-wide only (including subthreads), but should not propagate beyond the current process's shared environment. Link: https://lore.kernel.org/lkml/YBvj6eJR%2FDY2TsEB@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Signed-off-by: Marco Elver <elver@xxxxxxxxxx> --- v2: * Add patch to series. --- include/linux/perf_event.h | 5 +++-- include/uapi/linux/perf_event.h | 3 ++- kernel/events/core.c | 21 ++++++++++++++------- kernel/fork.c | 2 +- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fab42cfbd350..982ad61c653a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -955,7 +955,7 @@ extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); -extern int perf_event_init_task(struct task_struct *child); +extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); @@ -1446,7 +1446,8 @@ perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } -static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline int perf_event_init_task(struct task_struct *child, + u64 clone_flags) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index ad15e40d7f5d..813efb65fea8 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -389,7 +389,8 @@ struct perf_event_attr { cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ build_id : 1, /* use build id in mmap2 events */ - __reserved_1 : 29; + inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ + __reserved_1 : 28; union { __u32 wakeup_events; /* wakeup every n events */ diff --git a/kernel/events/core.c b/kernel/events/core.c index bff498766065..a8382e6c907c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11597,6 +11597,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) return -EINVAL; + if (!attr->inherit && attr->inherit_thread) + return -EINVAL; + out: return ret; @@ -12820,12 +12823,13 @@ static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, int ctxn, - int *inherited_all) + u64 clone_flags, int *inherited_all) { int ret; struct perf_event_context *child_ctx; - if (!event->attr.inherit) { + if (!event->attr.inherit || + (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) { *inherited_all = 0; return 0; } @@ -12857,7 +12861,8 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, /* * Initialize the perf_event context in task_struct */ -static int perf_event_init_context(struct task_struct *child, int ctxn) +static int perf_event_init_context(struct task_struct *child, int ctxn, + u64 clone_flags) { struct perf_event_context *child_ctx, *parent_ctx; struct perf_event_context *cloned_ctx; @@ -12897,7 +12902,8 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) */ perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { ret = inherit_task_group(event, parent, parent_ctx, - child, ctxn, &inherited_all); + child, ctxn, clone_flags, + &inherited_all); if (ret) goto out_unlock; } @@ -12913,7 +12919,8 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { ret = inherit_task_group(event, parent, parent_ctx, - child, ctxn, &inherited_all); + child, ctxn, clone_flags, + &inherited_all); if (ret) goto out_unlock; } @@ -12955,7 +12962,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) /* * Initialize the perf_event context in task_struct */ -int perf_event_init_task(struct task_struct *child) +int perf_event_init_task(struct task_struct *child, u64 clone_flags) { int ctxn, ret; @@ -12964,7 +12971,7 @@ int perf_event_init_task(struct task_struct *child) INIT_LIST_HEAD(&child->perf_event_list); for_each_task_context_nr(ctxn) { - ret = perf_event_init_context(child, ctxn); + ret = perf_event_init_context(child, ctxn, clone_flags); if (ret) { perf_event_free_task(child); return ret; diff --git a/kernel/fork.c b/kernel/fork.c index d3171e8e88e5..d090366d1206 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2070,7 +2070,7 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_policy; - retval = perf_event_init_task(p); + retval = perf_event_init_task(p, clone_flags); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); -- 2.30.1.766.gb4fecdf3b7-goog