To optimize the cgroup context switch, the perf_event_pmu_context iteration skips the PMUs without cgroup events. A bool cgroup was introduced to indicate the case. It can work, but this way is hard to extend for other cases, e.g. skipping non-passthrough PMUs. It doesn't make sense to keep adding bool variables. Pass the event_type instead of the specific bool variable. Check both the event_type and related pmu_ctx variables to decide whether skipping a PMU. Event flags, e.g., EVENT_CGROUP, should be cleard in the ctx->is_active. Add EVENT_FLAGS to indicate such event flags. No functional change. Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> --- kernel/events/core.c | 70 +++++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index abd4027e3859..95d1d5a5addc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -376,6 +376,7 @@ enum event_type_t { /* see ctx_resched() for details */ EVENT_CPU = 0x8, EVENT_CGROUP = 0x10, + EVENT_FLAGS = EVENT_CGROUP, EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, }; @@ -699,23 +700,32 @@ do { \ ___p; \ }) -static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup) +static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx, + enum event_type_t event_type) +{ + if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups) + return true; + + return false; +} + +static void perf_ctx_disable(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_event_pmu_context *pmu_ctx; list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - if (cgroup && !pmu_ctx->nr_cgroups) + if (perf_skip_pmu_ctx(pmu_ctx, event_type)) continue; perf_pmu_disable(pmu_ctx->pmu); } } -static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup) +static void perf_ctx_enable(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_event_pmu_context *pmu_ctx; list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - if (cgroup && !pmu_ctx->nr_cgroups) + if (perf_skip_pmu_ctx(pmu_ctx, event_type)) continue; perf_pmu_enable(pmu_ctx->pmu); } @@ -877,7 +887,7 @@ static void perf_cgroup_switch(struct task_struct *task) return; perf_ctx_lock(cpuctx, cpuctx->task_ctx); - perf_ctx_disable(&cpuctx->ctx, true); + perf_ctx_disable(&cpuctx->ctx, EVENT_CGROUP); ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); /* @@ -893,7 +903,7 @@ static void perf_cgroup_switch(struct task_struct *task) */ ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); - perf_ctx_enable(&cpuctx->ctx, true); + perf_ctx_enable(&cpuctx->ctx, EVENT_CGROUP); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } @@ -2729,9 +2739,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, event_type &= EVENT_ALL; - perf_ctx_disable(&cpuctx->ctx, false); + perf_ctx_disable(&cpuctx->ctx, 0); if (task_ctx) { - perf_ctx_disable(task_ctx, false); + perf_ctx_disable(task_ctx, 0); task_ctx_sched_out(task_ctx, event_type); } @@ -2749,9 +2759,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_event_sched_in(cpuctx, task_ctx); - perf_ctx_enable(&cpuctx->ctx, false); + perf_ctx_enable(&cpuctx->ctx, 0); if (task_ctx) - perf_ctx_enable(task_ctx, false); + perf_ctx_enable(task_ctx, 0); } void perf_pmu_resched(struct pmu *pmu) @@ -3296,9 +3306,6 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct perf_event_pmu_context *pmu_ctx; int is_active = ctx->is_active; - bool cgroup = event_type & EVENT_CGROUP; - - event_type &= ~EVENT_CGROUP; lockdep_assert_held(&ctx->lock); @@ -3333,7 +3340,7 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) barrier(); } - ctx->is_active &= ~event_type; + ctx->is_active &= ~(event_type & ~EVENT_FLAGS); if (!(ctx->is_active & EVENT_ALL)) ctx->is_active = 0; @@ -3346,7 +3353,7 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) is_active ^= ctx->is_active; /* changed bits */ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - if (cgroup && !pmu_ctx->nr_cgroups) + if (perf_skip_pmu_ctx(pmu_ctx, event_type)) continue; __pmu_ctx_sched_out(pmu_ctx, is_active); } @@ -3540,7 +3547,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { - perf_ctx_disable(ctx, false); + perf_ctx_disable(ctx, 0); /* PMIs are disabled; ctx->nr_pending is stable. */ if (local_read(&ctx->nr_pending) || @@ -3560,7 +3567,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) perf_ctx_sched_task_cb(ctx, false); perf_event_swap_task_ctx_data(ctx, next_ctx); - perf_ctx_enable(ctx, false); + perf_ctx_enable(ctx, 0); /* * RCU_INIT_POINTER here is safe because we've not @@ -3584,13 +3591,13 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) if (do_switch) { raw_spin_lock(&ctx->lock); - perf_ctx_disable(ctx, false); + perf_ctx_disable(ctx, 0); inside_switch: perf_ctx_sched_task_cb(ctx, false); task_ctx_sched_out(ctx, EVENT_ALL); - perf_ctx_enable(ctx, false); + perf_ctx_enable(ctx, 0); raw_spin_unlock(&ctx->lock); } } @@ -3887,12 +3894,12 @@ static void pmu_groups_sched_in(struct perf_event_context *ctx, static void ctx_groups_sched_in(struct perf_event_context *ctx, struct perf_event_groups *groups, - bool cgroup) + enum event_type_t event_type) { struct perf_event_pmu_context *pmu_ctx; list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - if (cgroup && !pmu_ctx->nr_cgroups) + if (perf_skip_pmu_ctx(pmu_ctx, event_type)) continue; pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu); } @@ -3909,9 +3916,6 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); int is_active = ctx->is_active; - bool cgroup = event_type & EVENT_CGROUP; - - event_type &= ~EVENT_CGROUP; lockdep_assert_held(&ctx->lock); @@ -3929,7 +3933,7 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) barrier(); } - ctx->is_active |= (event_type | EVENT_TIME); + ctx->is_active |= ((event_type & ~EVENT_FLAGS) | EVENT_TIME); if (ctx->task) { if (!is_active) cpuctx->task_ctx = ctx; @@ -3944,11 +3948,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) * in order to give them the best chance of going on. */ if (is_active & EVENT_PINNED) - ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup); + ctx_groups_sched_in(ctx, &ctx->pinned_groups, event_type); /* Then walk through the lower prio flexible groups */ if (is_active & EVENT_FLEXIBLE) - ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup); + ctx_groups_sched_in(ctx, &ctx->flexible_groups, event_type); } static void perf_event_context_sched_in(struct task_struct *task) @@ -3963,11 +3967,11 @@ static void perf_event_context_sched_in(struct task_struct *task) if (cpuctx->task_ctx == ctx) { perf_ctx_lock(cpuctx, ctx); - perf_ctx_disable(ctx, false); + perf_ctx_disable(ctx, 0); perf_ctx_sched_task_cb(ctx, true); - perf_ctx_enable(ctx, false); + perf_ctx_enable(ctx, 0); perf_ctx_unlock(cpuctx, ctx); goto rcu_unlock; } @@ -3980,7 +3984,7 @@ static void perf_event_context_sched_in(struct task_struct *task) if (!ctx->nr_events) goto unlock; - perf_ctx_disable(ctx, false); + perf_ctx_disable(ctx, 0); /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, @@ -3990,7 +3994,7 @@ static void perf_event_context_sched_in(struct task_struct *task) * events, no need to flip the cpuctx's events around. */ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) { - perf_ctx_disable(&cpuctx->ctx, false); + perf_ctx_disable(&cpuctx->ctx, 0); ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE); } @@ -3999,9 +4003,9 @@ static void perf_event_context_sched_in(struct task_struct *task) perf_ctx_sched_task_cb(cpuctx->task_ctx, true); if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) - perf_ctx_enable(&cpuctx->ctx, false); + perf_ctx_enable(&cpuctx->ctx, 0); - perf_ctx_enable(ctx, false); + perf_ctx_enable(ctx, 0); unlock: perf_ctx_unlock(cpuctx, ctx); Here are the second patch to introduce EVENT_GUEST and perf_guest_exit/enter().