3.16.50-rc1 review patch. If anyone has any objections, please let me know. ------------------ From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit fa8c269353d560b7c28119ad7617029f92e40b15 upstream. In order to enable the use of perf_event_read(.group = true), we need to invert the sibling-child loop nesting of perf_read_group(). Currently we iterate the child list for each sibling, this precludes using group reads. Flip things around so we iterate each group for each child. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> [ Made the patch compile and things. ] Signed-off-by: Sukadev Bhattiprolu <sukadev@xxxxxxxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx> Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx> Cc: Jiri Olsa <jolsa@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Stephane Eranian <eranian@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vince Weaver <vincent.weaver@xxxxxxxxx> Link: http://lkml.kernel.org/r/1441336073-22750-7-git-send-email-sukadev@xxxxxxxxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> [bwh: Backported to 3.16 as a dependency of commit 2aeb18835476 ("perf/core: Fix locking for children siblings group read"): - Keep the function name perf_event_read_group() - Keep using perf_event_read_value()] Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx> --- --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3565,50 +3565,71 @@ u64 perf_event_read_value(struct perf_ev } EXPORT_SYMBOL_GPL(perf_event_read_value); -static int perf_event_read_group(struct perf_event *event, - u64 read_format, char __user *buf) +static void __perf_read_group_add(struct perf_event *leader, + u64 read_format, u64 *values) { - struct perf_event *leader = event->group_leader, *sub; - struct perf_event_context *ctx = leader->ctx; - int n = 0, size = 0, ret; + struct perf_event *sub; + int n = 1; /* skip @nr */ u64 count, enabled, running; - u64 values[5]; - - lockdep_assert_held(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); - values[n++] = 1 + leader->nr_siblings; + /* + * Since we co-schedule groups, {enabled,running} times of siblings + * will be identical to those of the leader, so we only publish one + * set. + */ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; - values[n++] = count; + + /* + * Write {count,id} tuples for every sibling. + */ + values[n++] += count; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); - size = n * sizeof(u64); - - if (copy_to_user(buf, values, size)) - return -EFAULT; - - ret = size; - list_for_each_entry(sub, &leader->sibling_list, group_entry) { - n = 0; - values[n++] = perf_event_read_value(sub, &enabled, &running); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); + } +} - size = n * sizeof(u64); +static int perf_event_read_group(struct perf_event *event, + u64 read_format, char __user *buf) +{ + struct perf_event *leader = event->group_leader, *child; + struct perf_event_context *ctx = leader->ctx; + int ret = event->read_size; + u64 *values; - if (copy_to_user(buf + ret, values, size)) { - return -EFAULT; - } + lockdep_assert_held(&ctx->mutex); - ret += size; - } + values = kzalloc(event->read_size, GFP_KERNEL); + if (!values) + return -ENOMEM; + + values[0] = 1 + leader->nr_siblings; + + /* + * By locking the child_mutex of the leader we effectively + * lock the child list of all siblings.. XXX explain how. + */ + mutex_lock(&leader->child_mutex); + + __perf_read_group_add(leader, read_format, values); + list_for_each_entry(child, &leader->child_list, child_list) + __perf_read_group_add(child, read_format, values); + + mutex_unlock(&leader->child_mutex); + + if (copy_to_user(buf, values, event->read_size)) + ret = -EFAULT; + + kfree(values); return ret; }