The following commit has been merged into the sched/core branch of tip: Commit-ID: d0f5d3cefc259f498456338d319098dc84393b24 Gitweb: https://git.kernel.org/tip/d0f5d3cefc259f498456338d319098dc84393b24 Author: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx> AuthorDate: Thu, 07 Mar 2024 14:27:24 +05:30 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Tue, 26 Mar 2024 08:58:59 +01:00 sched/fair: Introduce is_rd_overutilized() helper function to access root_domain::overutilized The root_domain::overutilized field is READ_ONCE() accessed in multiple places, which could be simplified with a helper function. This might also make it more apparent that it needs to be used only in case of EAS. No change in functionality intended. Signed-off-by: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Reviewed-by: Qais Yousef <qyousef@xxxxxxxxxxx> Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Link: https://lore.kernel.org/r/20240307085725.444486-3-sshegde@xxxxxxxxxxxxx --- kernel/sched/fair.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1afa4f8..24a7530 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6685,6 +6685,15 @@ static inline bool cpu_overutilized(int cpu) return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu); } +/* + * Ensure that caller can do EAS. overutilized value + * make sense only if EAS is enabled + */ +static inline int is_rd_overutilized(struct root_domain *rd) +{ + return READ_ONCE(rd->overutilized); +} + static inline void set_rd_overutilized_status(struct root_domain *rd, unsigned int status) { @@ -6704,7 +6713,7 @@ static inline void check_update_overutilized_status(struct rq *rq) if (!sched_energy_enabled()) return; - if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) + if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); } #else @@ -7990,7 +7999,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) rcu_read_lock(); pd = rcu_dereference(rd->pd); - if (!pd || READ_ONCE(rd->overutilized)) + if (!pd || is_rd_overutilized(rd)) goto unlock; /* @@ -10897,7 +10906,7 @@ static struct sched_group *sched_balance_find_src_group(struct lb_env *env) if (sched_energy_enabled()) { struct root_domain *rd = env->dst_rq->rd; - if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) + if (rcu_dereference(rd->pd) && !is_rd_overutilized(rd)) goto out_balanced; }