On 07/01/2020 14:42, Quentin Perret wrote: > Hi Qais, > > On Friday 20 Dec 2019 at 16:48:38 (+0000), Qais Yousef wrote: [...] >> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c >> index e591d40fd645..19572dfc175b 100644 >> --- a/kernel/sched/rt.c >> +++ b/kernel/sched/rt.c >> @@ -2147,6 +2147,12 @@ static void pull_rt_task(struct rq *this_rq) >> */ >> static void task_woken_rt(struct rq *rq, struct task_struct *p) >> { >> + /* >> + * When sysctl_sched_rt_uclamp_util_min value is changed by the user, >> + * we apply any new value on the next wakeup, which is here. >> + */ >> + uclamp_rt_sync_default_util_min(p); > > The task has already been enqueued and sugov has been called by then I > think, so this is a bit late. You could do that in uclamp_rq_inc() maybe? That's probably better. Just to be sure ...we want this feature (an existing rt task gets its UCLAMP_MIN value set when the sysctl changes) because there could be rt tasks running before the sysctl is set? >> + >> if (!task_running(rq, p) && >> !test_tsk_need_resched(rq->curr) && >> p->nr_cpus_allowed > 1 && >> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h >> index 280a3c735935..337bf17b1a9d 100644 >> --- a/kernel/sched/sched.h >> +++ b/kernel/sched/sched.h >> @@ -2300,6 +2300,8 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} >> #endif /* CONFIG_CPU_FREQ */ >> >> #ifdef CONFIG_UCLAMP_TASK >> +void uclamp_rt_sync_default_util_min(struct task_struct *p); >> + >> unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); >> >> static __always_inline >> @@ -2330,6 +2332,8 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) >> return uclamp_util_with(rq, util, NULL); >> } >> #else /* CONFIG_UCLAMP_TASK */ >> +void uclamp_rt_sync_default_util_min(struct task_struct *p) {} -void uclamp_rt_sync_default_util_min(struct task_struct *p) {} +static inline void uclamp_rt_sync_default_util_min(struct task_struct *p) {} [...]