The following commit has been merged into the sched/core branch of tip: Commit-ID: 12fc0fdcbd1cc2f906596b77c03d7e5ed58947d1 Gitweb: https://git.kernel.org/tip/12fc0fdcbd1cc2f906596b77c03d7e5ed58947d1 Author: Xuewen Yan <xuewen.yan@xxxxxxxxxx> AuthorDate: Wed, 19 Feb 2025 17:37:46 +08:00 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitterDate: Fri, 14 Mar 2025 21:13:18 +01:00 sched/uclamp: Always using uclamp_is_used() Now, we have the uclamp_is_used() func to judge the uclamp enabled, so replace the static_branch_unlikely(&sched_uclamp_used) with it. Signed-off-by: Xuewen Yan <xuewen.yan@xxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Hongyan Xia <hongyan.xia2@xxxxxxx> Reviewed-by: Christian Loehle <christian.loehle@xxxxxxx> Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Link: https://lore.kernel.org/r/20250219093747.2612-1-xuewen.yan@xxxxxxxxxx --- kernel/sched/core.c | 4 ++-- kernel/sched/sched.h | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 621cfc7..45daa41 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1756,7 +1756,7 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) * The condition is constructed such that a NOP is generated when * sched_uclamp_used is disabled. */ - if (!static_branch_unlikely(&sched_uclamp_used)) + if (!uclamp_is_used()) return; if (unlikely(!p->sched_class->uclamp_enabled)) @@ -1783,7 +1783,7 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) * The condition is constructed such that a NOP is generated when * sched_uclamp_used is disabled. */ - if (!static_branch_unlikely(&sched_uclamp_used)) + if (!uclamp_is_used()) return; if (unlikely(!p->sched_class->uclamp_enabled)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 023b844..8d42d3c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3394,6 +3394,19 @@ static inline bool update_other_load_avgs(struct rq *rq) { return false; } unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +/* + * When uclamp is compiled in, the aggregation at rq level is 'turned off' + * by default in the fast path and only gets turned on once userspace performs + * an operation that requires it. + * + * Returns true if userspace opted-in to use uclamp and aggregation at rq level + * hence is active. + */ +static inline bool uclamp_is_used(void) +{ + return static_branch_likely(&sched_uclamp_used); +} + static inline unsigned long uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) { @@ -3417,7 +3430,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq) unsigned long rq_util; unsigned long max_util; - if (!static_branch_likely(&sched_uclamp_used)) + if (!uclamp_is_used()) return false; rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); @@ -3426,19 +3439,6 @@ static inline bool uclamp_rq_is_capped(struct rq *rq) return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; } -/* - * When uclamp is compiled in, the aggregation at rq level is 'turned off' - * by default in the fast path and only gets turned on once userspace performs - * an operation that requires it. - * - * Returns true if userspace opted-in to use uclamp and aggregation at rq level - * hence is active. - */ -static inline bool uclamp_is_used(void) -{ - return static_branch_likely(&sched_uclamp_used); -} - #define for_each_clamp_id(clamp_id) \ for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
![]() |