The X1E80100 SoC hosts a number of cpu boost frequencies, so let's enable boost support if the freq_table has any opps marked as turbo in it. Signed-off-by: Sibi Sankar <quic_sibis@xxxxxxxxxxx> --- v3: * Don't set per-policy boost flags from the cpufreq driver. [Viresh] drivers/cpufreq/scmi-cpufreq.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 0b483bd0d3ca..3b4f6bfb2f4c 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -30,6 +30,7 @@ struct scmi_data { static struct scmi_protocol_handle *ph; static const struct scmi_perf_proto_ops *perf_ops; +static struct cpufreq_driver scmi_cpufreq_driver; static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) { @@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch) return rate_limit; } +static struct freq_attr *scmi_cpufreq_hw_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, + NULL, +}; + static int scmi_cpufreq_init(struct cpufreq_policy *policy) { int ret, nr_opp, domain; @@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->transition_delay_us = scmi_get_rate_limit(domain, policy->fast_switch_possible); + if (policy_has_boost_freq(policy)) { + ret = cpufreq_enable_boost_support(); + if (ret) { + dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); + goto out_free_opp; + } else { + scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + scmi_cpufreq_driver.boost_enabled = true; + } + } + return 0; out_free_opp: @@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = { CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, - .attr = cpufreq_generic_attr, + .attr = scmi_cpufreq_hw_attr, .target_index = scmi_cpufreq_set_target, .fast_switch = scmi_cpufreq_fast_switch, .get = scmi_cpufreq_get_rate, -- 2.34.1