From: "Rafael J. Wysocki" <rafael.j.wysocki@xxxxxxxxx> Notice that hybrid_init_cpu_capacity_scaling() only needs to hold hybrid_capacity_lock around __hybrid_init_cpu_capacity_scaling() calls, so introduce a "locked" wrapper around the latter and call it from the former. This allows to drop a local variable and a label that are not needed any more. Also, rename __hybrid_init_cpu_capacity_scaling() to __hybrid_refresh_cpu_capacity_scaling() for consistency. No intentional functional impact. This is Core-for-CI patch for regression seen [1] [1] https://lore.kernel.org/linux-pm/SJ1PR11MB6129EDBF22F8A90FC3A3EDC8B9582@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@xxxxxxxxx> References: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12763 Link: https://lore.kernel.org/linux-pm/12554508.O9o76ZdvQC@xxxxxxxxxxxxx/ Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@xxxxxxxxx> --- drivers/cpufreq/intel_pstate.c | 35 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cd2ac1ba53d2..400337f3b572 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1028,26 +1028,29 @@ static void hybrid_update_cpu_capacity_scaling(void) } } -static void __hybrid_init_cpu_capacity_scaling(void) +static void __hybrid_refresh_cpu_capacity_scaling(void) { hybrid_max_perf_cpu = NULL; hybrid_update_cpu_capacity_scaling(); } -static void hybrid_init_cpu_capacity_scaling(bool refresh) +static void hybrid_refresh_cpu_capacity_scaling(void) { - bool disable_itmt = false; + guard(mutex)(&hybrid_capacity_lock); - mutex_lock(&hybrid_capacity_lock); + __hybrid_refresh_cpu_capacity_scaling(); +} +static void hybrid_init_cpu_capacity_scaling(bool refresh) +{ /* * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity * scaling has been enabled already and the driver is just changing the * operation mode. */ if (refresh) { - __hybrid_init_cpu_capacity_scaling(); - goto unlock; + hybrid_refresh_cpu_capacity_scaling(); + return; } /* @@ -1056,19 +1059,13 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh) * do not do that when SMT is in use. */ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { - __hybrid_init_cpu_capacity_scaling(); - disable_itmt = true; - } - -unlock: - mutex_unlock(&hybrid_capacity_lock); - - /* - * Disabling ITMT causes sched domains to be rebuilt to disable asym - * packing and enable asym capacity. - */ - if (disable_itmt) + hybrid_refresh_cpu_capacity_scaling(); + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity. + */ sched_clear_itmt_support(); + } } static bool hybrid_clear_max_perf_cpu(void) @@ -1404,7 +1401,7 @@ static void intel_pstate_update_limits_for_all(void) mutex_lock(&hybrid_capacity_lock); if (hybrid_max_perf_cpu) - __hybrid_init_cpu_capacity_scaling(); + __hybrid_refresh_cpu_capacity_scaling(); mutex_unlock(&hybrid_capacity_lock); } -- 2.25.1