This is a note to let you know that I've just added the patch titled sched/uclamp: Fix fits_capacity() check in feec() to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: sched-uclamp-fix-fits_capacity-check-in-feec.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From stable-owner@xxxxxxxxxxxxxxx Wed Mar 8 17:22:30 2023 From: Qais Yousef <qyousef@xxxxxxxxxxx> Date: Wed, 8 Mar 2023 16:22:01 +0000 Subject: sched/uclamp: Fix fits_capacity() check in feec() To: stable@xxxxxxxxxxxxxxx Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>, Vincent Guittot <vincent.guittot@xxxxxxxxxx>, Dietmar Eggemann <dietmar.eggemann@xxxxxxx>, Qais Yousef <qais.yousef@xxxxxxx>, Yun Hsiang <hsiang023167@xxxxxxxxx> Message-ID: <20230308162207.2886641-2-qyousef@xxxxxxxxxxx> From: Qais Yousef <qais.yousef@xxxxxxx> commit 244226035a1f9b2b6c326e55ae5188fab4f428cb upstream. As reported by Yun Hsiang [1], if a task has its uclamp_min >= 0.8 * 1024, it'll always pick the previous CPU because fits_capacity() will always return false in this case. The new util_fits_cpu() logic should handle this correctly for us beside more corner cases where similar failures could occur, like when using UCLAMP_MAX. We open code uclamp_rq_util_with() except for the clamp() part, util_fits_cpu() needs the 'raw' values to be passed to it. Also introduce uclamp_rq_{set, get}() shorthand accessors to get uclamp value for the rq. Makes the code more readable and ensures the right rules (use READ_ONCE/WRITE_ONCE) are respected transparently. [1] https://lists.linaro.org/pipermail/eas-dev/2020-July/001488.html Fixes: 1d42509e475c ("sched/fair: Make EAS wakeup placement consider uclamp restrictions") Reported-by: Yun Hsiang <hsiang023167@xxxxxxxxx> Signed-off-by: Qais Yousef <qais.yousef@xxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20220804143609.515789-4-qais.yousef@xxxxxxx (cherry picked from commit 244226035a1f9b2b6c326e55ae5188fab4f428cb) [Conflict in kernel/sched/fair.c mainly due to new automatic variables being added on master vs 5.15] Signed-off-by: Qais Yousef (Google) <qyousef@xxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- kernel/sched/core.c | 10 +++++----- kernel/sched/fair.c | 26 ++++++++++++++++++++++++-- kernel/sched/sched.h | 42 +++++++++++++++++++++++++++++++++++++++--- 3 files changed, 68 insertions(+), 10 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1335,7 +1335,7 @@ static inline void uclamp_idle_reset(str if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) return; - WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); + uclamp_rq_set(rq, clamp_id, clamp_value); } static inline @@ -1513,8 +1513,8 @@ static inline void uclamp_rq_inc_id(stru if (bucket->tasks == 1 || uc_se->value > bucket->value) bucket->value = uc_se->value; - if (uc_se->value > READ_ONCE(uc_rq->value)) - WRITE_ONCE(uc_rq->value, uc_se->value); + if (uc_se->value > uclamp_rq_get(rq, clamp_id)) + uclamp_rq_set(rq, clamp_id, uc_se->value); } /* @@ -1580,7 +1580,7 @@ static inline void uclamp_rq_dec_id(stru if (likely(bucket->tasks)) return; - rq_clamp = READ_ONCE(uc_rq->value); + rq_clamp = uclamp_rq_get(rq, clamp_id); /* * Defensive programming: this should never happen. If it happens, * e.g. due to future modification, warn and fixup the expected value. @@ -1588,7 +1588,7 @@ static inline void uclamp_rq_dec_id(stru SCHED_WARN_ON(bucket->value > rq_clamp); if (bucket->value >= rq_clamp) { bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); - WRITE_ONCE(uc_rq->value, bkt_clamp); + uclamp_rq_set(rq, clamp_id, bkt_clamp); } } --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6938,6 +6938,8 @@ compute_energy(struct task_struct *p, in static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) { unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; + unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; + unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; struct root_domain *rd = cpu_rq(smp_processor_id())->rd; int cpu, best_energy_cpu = prev_cpu, target = -1; unsigned long cpu_cap, util, base_energy = 0; @@ -6967,6 +6969,8 @@ static int find_energy_efficient_cpu(str for (; pd; pd = pd->next) { unsigned long cur_delta, spare_cap, max_spare_cap = 0; + unsigned long rq_util_min, rq_util_max; + unsigned long util_min, util_max; bool compute_prev_delta = false; unsigned long base_energy_pd; int max_spare_cap_cpu = -1; @@ -6987,8 +6991,26 @@ static int find_energy_efficient_cpu(str * much capacity we can get out of the CPU; this is * aligned with sched_cpu_util(). */ - util = uclamp_rq_util_with(cpu_rq(cpu), util, p); - if (!fits_capacity(util, cpu_cap)) + if (uclamp_is_used()) { + if (uclamp_rq_is_idle(cpu_rq(cpu))) { + util_min = p_util_min; + util_max = p_util_max; + } else { + /* + * Open code uclamp_rq_util_with() except for + * the clamp() part. Ie: apply max aggregation + * only. util_fits_cpu() logic requires to + * operate on non clamped util but must use the + * max-aggregated uclamp_{min, max}. + */ + rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); + rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); + + util_min = max(rq_util_min, p_util_min); + util_max = max(rq_util_max, p_util_max); + } + } + if (!util_fits_cpu(util, util_min, util_max, cpu)) continue; if (cpu == prev_cpu) { --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2855,6 +2855,23 @@ static inline void cpufreq_update_util(s #ifdef CONFIG_UCLAMP_TASK unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +static inline unsigned long uclamp_rq_get(struct rq *rq, + enum uclamp_id clamp_id) +{ + return READ_ONCE(rq->uclamp[clamp_id].value); +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, + unsigned int value) +{ + WRITE_ONCE(rq->uclamp[clamp_id].value, value); +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ + return rq->uclamp_flags & UCLAMP_FLAG_IDLE; +} + /** * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. * @rq: The rq to clamp against. Must not be NULL. @@ -2890,12 +2907,12 @@ unsigned long uclamp_rq_util_with(struct * Ignore last runnable task's max clamp, as this task will * reset it. Similarly, no need to read the rq's min clamp. */ - if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) + if (uclamp_rq_is_idle(rq)) goto out; } - min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); - max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); + min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); + max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); out: /* * Since CPU's {min,max}_util clamps are MAX aggregated considering @@ -2941,6 +2958,25 @@ static inline bool uclamp_is_used(void) { return false; } + +static inline unsigned long uclamp_rq_get(struct rq *rq, + enum uclamp_id clamp_id) +{ + if (clamp_id == UCLAMP_MIN) + return 0; + + return SCHED_CAPACITY_SCALE; +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, + unsigned int value) +{ +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ + return false; +} #endif /* CONFIG_UCLAMP_TASK */ #ifdef arch_scale_freq_capacity Patches currently in stable-queue which might be from stable-owner@xxxxxxxxxxxxxxx are queue-5.15/sched-fair-detect-capacity-inversion.patch queue-5.15/sched-uclamp-make-cpu_overutilized-use-util_fits_cpu.patch queue-5.15/sched-uclamp-fix-fits_capacity-check-in-feec.patch queue-5.15/sched-fair-consider-capacity-inversion-in-util_fits_cpu.patch queue-5.15/sched-fair-fixes-for-capacity-inversion-detection.patch queue-5.15/sched-uclamp-fix-a-uninitialized-variable-warnings.patch queue-5.15/sched-uclamp-cater-for-uclamp-in-find_energy_efficient_cpu-s-early-exit-condition.patch