Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- kernel/events/core.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1153,6 +1153,8 @@ void perf_pmu_enable(struct pmu *pmu) pmu->pmu_enable(pmu); } +DEFINE_GUARD(perf_pmu_disable, struct pmu *, perf_pmu_disable(_T), perf_pmu_enable(_T)) + static void perf_assert_pmu_disabled(struct pmu *pmu) { WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0); @@ -2489,7 +2491,6 @@ event_sched_in(struct perf_event *event, { struct perf_event_pmu_context *epc = event->pmu_ctx; struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); - int ret = 0; WARN_ON_ONCE(event->ctx != ctx); @@ -2517,15 +2518,14 @@ event_sched_in(struct perf_event *event, event->hw.interrupts = 0; } - perf_pmu_disable(event->pmu); + guard(perf_pmu_disable)(event->pmu); perf_log_itrace_start(event); if (event->pmu->add(event, PERF_EF_START)) { perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); event->oncpu = -1; - ret = -EAGAIN; - goto out; + return -EAGAIN; } if (!is_software_event(event)) @@ -2536,10 +2536,7 @@ event_sched_in(struct perf_event *event, if (event->attr.exclusive) cpc->exclusive = 1; -out: - perf_pmu_enable(event->pmu); - - return ret; + return 0; } static int