The system currently evaluates all online CPUs whenever one or more enters an rt_overload condition. This suffers from scalability limitations as the # of online CPUs increases. So we introduce a cpumask to track exactly which CPUs need RT balancing. Signed-off-by: Gregory Haskins <ghaskins@xxxxxxxxxx> CC: Peter W. Morreale <pmorreale@xxxxxxxxxx> --- kernel/sched.c | 12 +++++++++--- 1 files changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 0ca3905..41b0e9c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -632,6 +632,7 @@ static inline struct rq *this_rq_lock(void) #if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) static __cacheline_aligned_in_smp atomic_t rt_overload; +static cpumask_t rto_cpus; #endif static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) @@ -640,8 +641,11 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) if (rt_task(p)) { rq->rt_nr_running++; # ifdef CONFIG_SMP - if (rq->rt_nr_running == 2) + if (rq->rt_nr_running == 2) { + cpu_set(rq->cpu, rto_cpus); + smp_wmb(); atomic_inc(&rt_overload); + } # endif } #endif @@ -654,8 +658,10 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) WARN_ON(!rq->rt_nr_running); rq->rt_nr_running--; # ifdef CONFIG_SMP - if (rq->rt_nr_running == 1) + if (rq->rt_nr_running == 1) { atomic_dec(&rt_overload); + cpu_clear(rq->cpu, rto_cpus); + } # endif } #endif @@ -1590,7 +1596,7 @@ static void balance_rt_tasks(struct rq *this_rq, int this_cpu) */ next = pick_next_task(this_rq, this_rq->curr); - for_each_online_cpu(cpu) { + for_each_cpu_mask(cpu, rto_cpus) { if (cpu == this_cpu) continue; src_rq = cpu_rq(cpu); - To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html