On 20/02/25 11:40, Juri Lelli wrote: > On 19/02/25 19:14, Dietmar Eggemann wrote: ... > OK. CPU3 + CPU4 (CPU5 offline). > > > [ 171.003085] __dl_update() (3) cpu=2 rq->dl.extra_bw=1122848 > > [ 171.003091] __dl_update() (3) cpu=3 rq->dl.extra_bw=1022361 > > [ 171.003096] __dl_update() (3) cpu=4 rq->dl.extra_bw=1035468 > > [ 171.003103] dl_bw_cpus() cpu=2 rd->span=0-2 cpu_active_mask=0-4 cpumask_weight(rd->span)=3 type=DYN > > [ 171.003113] __dl_server_attach_root() called cpu=2 > > [ 171.003118] dl_bw_cpus() cpu=2 rd->span=0-2 cpu_active_mask=0-4 cpumask_weight(rd->span)=3 type=DYN > > [ 171.003127] __dl_add() tsk_bw=52428 dl_b->total_bw=157284 type=DYN rd->span=0-2 > > [ 171.003136] __dl_update() (3) cpu=0 rq->dl.extra_bw=477111 > > [ 171.003141] __dl_update() (3) cpu=1 rq->dl.extra_bw=851970 > > [ 171.003147] __dl_update() (3) cpu=2 rq->dl.extra_bw=1105372 > > [ 171.003188] root domain span: 0-2 > > [ 171.003194] default domain span: 3-5 > > [ 171.003220] rd 0-2: Checking EAS, schedutil is mandatory > > [ 171.005840] psci: CPU5 killed (polled 0 ms) > > OK. DYN has (CPU0,1,2) 157284 and DEF (CPU3,4) 104856. > > CPU4 going offline (it's isolated on DEF). > > > [ 171.006436] dl_bw_deactivate() called cpu=4 > > [ 171.006446] __dl_bw_capacity() mask=3-5 cap=892 > > [ 171.006454] dl_bw_cpus() cpu=4 rd->span=3-5 cpu_active_mask=0-4 cpus=2 type=DEF > > [ 171.006464] dl_bw_manage: cpu=4 cap=446 fair_server_bw=52428 total_bw=104856 dl_bw_cpus=2 type=DEF span=3-5 > > [ 171.006475] dl_bw_cpus() cpu=4 rd->span=3-5 cpu_active_mask=0-4 cpus=2 type=DEF > > [ 171.006485] CPU: 4 UID: 0 PID: 36 Comm: cpuhp/4 Not tainted 6.13.0-09343-g9ce523149e08-dirty #172 > > [ 171.006495] Hardware name: ARM Juno development board (r0) (DT) > > [ 171.006499] Call trace: > > [ 171.006502] show_stack+0x18/0x24 (C) > > [ 171.006514] dump_stack_lvl+0x74/0x8c > > [ 171.006528] dump_stack+0x18/0x24 > > [ 171.006541] dl_bw_manage+0x3a0/0x500 > > [ 171.006554] dl_bw_deactivate+0x40/0x50 > > [ 171.006564] sched_cpu_deactivate+0x34/0x24c > > [ 171.006579] cpuhp_invoke_callback+0x138/0x694 > > [ 171.006591] cpuhp_thread_fun+0xb0/0x198 > > [ 171.006604] smpboot_thread_fn+0x200/0x224 > > [ 171.006616] kthread+0x12c/0x204 > > [ 171.006627] ret_from_fork+0x10/0x20 > > [ 171.006639] __dl_overflow() dl_b->bw=996147 cap=446 cap_scale(dl_b->bw, cap)=433868 dl_b->total_bw=104856 old_bw=52428 new_bw=0 type=DEF rd->span=3-5 > > [ 171.006652] dl_bw_manage() cpu=4 cap=446 overflow=0 req=0 return=0 type=DEF > > [ 171.006706] partition_sched_domains() called > > [ 171.006713] CPU: 4 UID: 0 PID: 36 Comm: cpuhp/4 Not tainted 6.13.0-09343-g9ce523149e08-dirty #172 > > [ 171.006722] Hardware name: ARM Juno development board (r0) (DT) > > [ 171.006727] Call trace: > > [ 171.006730] show_stack+0x18/0x24 (C) > > [ 171.006740] dump_stack_lvl+0x74/0x8c > > [ 171.006754] dump_stack+0x18/0x24 > > [ 171.006767] partition_sched_domains+0x48/0x7c > > [ 171.006778] sched_cpu_deactivate+0x1a8/0x24c > > [ 171.006792] cpuhp_invoke_callback+0x138/0x694 > > [ 171.006805] cpuhp_thread_fun+0xb0/0x198 > > [ 171.006817] smpboot_thread_fn+0x200/0x224 > > [ 171.006829] kthread+0x12c/0x204 > > [ 171.006840] ret_from_fork+0x10/0x20 > > [ 171.006852] partition_sched_domains_locked() ndoms_new=1 > > [ 171.006861] partition_sched_domains_locked() goto match2 > > [ 171.006867] rd 0-2: Checking EAS, schedutil is mandatory > > [ 171.007774] psci: CPU4 killed (polled 4 ms) > > As I guess you were saying above, CPU4 contribution is not removed from > DEF. > > > [ 171.007971] dl_bw_deactivate() called cpu=3 > > [ 171.007981] __dl_bw_capacity() mask=3-5 cap=446 > > [ 171.007989] dl_bw_cpus() cpu=3 rd->span=3-5 cpu_active_mask=0-3 cpus=1 type=DEF > > [ 171.007999] dl_bw_manage: cpu=3 cap=0 fair_server_bw=52428 total_bw=104856 dl_bw_cpus=1 type=DEF span=3-5 > ^^^^ > And this is now wrong. :/ So, CPU4 was still on DEF and we don't go through any of the accouting functions. I wonder if we could simplify this by always re-doing the accounting after root domains are stable (also for partition_ sched_domain()). So, please take a look at what below. It can definitely be better encapsulated (also more cleanups are needed) and maybe it's just useless/stupid (hard to say here because I always see 'pass' whatever I try to change), but anyway. Also pushed to the usual branch. --- include/linux/sched/deadline.h | 4 ++++ kernel/cgroup/cpuset.c | 13 ++++++++----- kernel/sched/deadline.c | 11 ++++++++--- kernel/sched/topology.c | 1 + 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 3a912ab42bb5..8fc4918c6f3f 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -34,6 +34,10 @@ static inline bool dl_time_before(u64 a, u64 b) struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); +extern void dl_clear_root_domain_cpu(int cpu); + +extern u64 dl_generation; +extern bool dl_bw_visited(int cpu, u64 gen); #endif /* CONFIG_SMP */ diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 0f910c828973..52243dcc61ab 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -958,6 +958,8 @@ static void dl_rebuild_rd_accounting(void) { struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; + int cpu; + u64 gen = ++dl_generation; lockdep_assert_held(&cpuset_mutex); lockdep_assert_cpus_held(); @@ -965,11 +967,12 @@ static void dl_rebuild_rd_accounting(void) rcu_read_lock(); - /* - * Clear default root domain DL accounting, it will be computed again - * if a task belongs to it. - */ - dl_clear_root_domain(&def_root_domain); + for_each_possible_cpu(cpu) { + if (dl_bw_visited(cpu, gen)) + continue; + + dl_clear_root_domain_cpu(cpu); + } cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 8f7420e0c9d6..a6723ed84e68 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -166,7 +166,7 @@ static inline unsigned long dl_bw_capacity(int i) } } -static inline bool dl_bw_visited(int cpu, u64 gen) +bool dl_bw_visited(int cpu, u64 gen) { struct root_domain *rd = cpu_rq(cpu)->rd; @@ -207,7 +207,7 @@ static inline unsigned long dl_bw_capacity(int i) return SCHED_CAPACITY_SCALE; } -static inline bool dl_bw_visited(int cpu, u64 gen) +bool dl_bw_visited(int cpu, u64 gen) { return false; } @@ -3037,6 +3037,11 @@ void dl_clear_root_domain(struct root_domain *rd) } } +void dl_clear_root_domain_cpu(int cpu) { + printk_deferred("%s: cpu=%d\n", __func__, cpu); + dl_clear_root_domain(cpu_rq(cpu)->rd); +} + #endif /* CONFIG_SMP */ static void switched_from_dl(struct rq *rq, struct task_struct *p) @@ -3216,7 +3221,7 @@ DEFINE_SCHED_CLASS(dl) = { }; /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ -static u64 dl_generation; +u64 dl_generation; int sched_dl_global_validate(void) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index c6a140d8d851..9892e6fa3e57 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2814,5 +2814,6 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], { mutex_lock(&sched_domains_mutex); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); + dl_rebuild_rd_accounting(); mutex_unlock(&sched_domains_mutex); }