First, we want to prevent placement of RT tasks on non-root rt_rqs which we achieve in the task migration code that'd fall back to root_task_group's rt_rq. Second, we want to work with only root_task_group's rt_rq when iterating all "real" rt_rqs when RT_GROUP is disabled. To achieve this we keep root_task_group as the first one on the task_groups and break out quickly. Signed-off-by: Michal Koutný <mkoutny@xxxxxxxx> --- kernel/sched/core.c | 2 +- kernel/sched/rt.c | 9 ++++++--- kernel/sched/sched.h | 7 +++++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 47898f895a5a3..dfd2778622b8b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8958,7 +8958,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) unsigned long flags; spin_lock_irqsave(&task_group_lock, flags); - list_add_rcu(&tg->list, &task_groups); + list_add_tail_rcu(&tg->list, &task_groups); /* Root should already exist: */ WARN_ON(!parent); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 41fed8865cb09..923ec978ff756 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -497,6 +497,9 @@ typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) + return NULL; + do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); @@ -509,9 +512,9 @@ static inline struct task_group *next_task_group(struct task_group *tg) } #define for_each_rt_rq(rt_rq, iter, rq) \ - for (iter = container_of(&task_groups, typeof(*iter), list); \ - (iter = next_task_group(iter)) && \ - (rt_rq = iter->rt_rq[cpu_of(rq)]);) + for (iter = &root_task_group; \ + iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \ + iter = next_task_group(iter)) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1c457dc1472a3..d8d28c3d1ac5f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2148,6 +2148,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) #endif #ifdef CONFIG_RT_GROUP_SCHED + /* + * p->rt.rt_rq is NULL initially and it is easier to assign + * root_task_group's rt_rq than switching in rt_rq_of_se() + * Clobbers tg(!) + */ + if (!rt_group_sched_enabled()) + tg = &root_task_group; p->rt.rt_rq = tg->rt_rq[cpu]; p->rt.parent = tg->rt_se[cpu]; #endif -- 2.47.1