On Fri, Aug 2, 2019 at 7:44 AM Viresh Kumar <viresh.kumar@xxxxxxxxxx> wrote: > > To avoid reducing the frequency of a CPU prematurely, we skip reducing > the frequency if the CPU had been busy recently. > > This should not be done when the limits of the policy are changed, for > example due to thermal throttling. We should always get the frequency > within the new limits as soon as possible. > > Trying to fix this by using only one flag, i.e. need_freq_update, can > lead to a race condition where the flag gets cleared without forcing us > to change the frequency at least once. And so this patch introduces > another flag to avoid that race condition. > > Fixes: ecd288429126 ("cpufreq: schedutil: Don't set next_freq to UINT_MAX") > Cc: v4.18+ <stable@xxxxxxxxxxxxxxx> # v4.18+ > Reported-by: Doug Smythies <doug.smythies@xxxxxxxxx> > Signed-off-by: Viresh Kumar <viresh.kumar@xxxxxxxxxx> > --- > V2->V3: > - Updated commit log. > > V1->V2: > - Fixed the race condition using a different flag. > > @Doug: I haven't changed the code since you last tested these. Your > Tested-by tag can be useful while applying the patches. Thanks. > > kernel/sched/cpufreq_schedutil.c | 17 ++++++++++++----- > 1 file changed, 12 insertions(+), 5 deletions(-) > > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c > index 636ca6f88c8e..2f382b0959e5 100644 > --- a/kernel/sched/cpufreq_schedutil.c > +++ b/kernel/sched/cpufreq_schedutil.c > @@ -40,6 +40,7 @@ struct sugov_policy { > struct task_struct *thread; > bool work_in_progress; > > + bool limits_changed; > bool need_freq_update; > }; > > @@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) > !cpufreq_this_cpu_can_update(sg_policy->policy)) > return false; > > - if (unlikely(sg_policy->need_freq_update)) > + if (unlikely(sg_policy->limits_changed)) { > + sg_policy->limits_changed = false; > + sg_policy->need_freq_update = true; > return true; > + } > > delta_ns = time - sg_policy->last_freq_update_time; > > @@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } > static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) > { > if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) > - sg_policy->need_freq_update = true; > + sg_policy->limits_changed = true; > } > > static void sugov_update_single(struct update_util_data *hook, u64 time, > @@ -447,7 +451,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, > struct sugov_policy *sg_policy = sg_cpu->sg_policy; > unsigned long util, max; > unsigned int next_f; > - bool busy; > + bool busy = false; This shouldn't be necessary -> > > sugov_iowait_boost(sg_cpu, time, flags); > sg_cpu->last_update = time; > @@ -457,7 +461,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, > if (!sugov_should_update_freq(sg_policy, time)) > return; > > - busy = sugov_cpu_is_busy(sg_cpu); > + /* Limits may have changed, don't skip frequency update */ > + if (!sg_policy->need_freq_update) > + busy = sugov_cpu_is_busy(sg_cpu); -> if this is rewritten as busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); which is simpler and avoids the extra branch. > > util = sugov_get_util(sg_cpu); > max = sg_cpu->max; > @@ -831,6 +837,7 @@ static int sugov_start(struct cpufreq_policy *policy) > sg_policy->last_freq_update_time = 0; > sg_policy->next_freq = 0; > sg_policy->work_in_progress = false; > + sg_policy->limits_changed = false; > sg_policy->need_freq_update = false; > sg_policy->cached_raw_freq = 0; > > @@ -879,7 +886,7 @@ static void sugov_limits(struct cpufreq_policy *policy) > mutex_unlock(&sg_policy->work_lock); > } > > - sg_policy->need_freq_update = true; > + sg_policy->limits_changed = true; > } > > struct cpufreq_governor schedutil_gov = { > -- > 2.21.0.rc0.269.g1a574e7a288b >