The code currently blindly fires IPIs out whenever an overload occurs. However, there are strict events that govern when a rt-overload exists (e.g. RT task added to a RQ, or an RT task preempted). Therefore, we attempt to efficiently track which CPUs are eligible for rebalancing, and we only IPI those affected units. Signed-off-by: Gregory Haskins <ghaskins@xxxxxxxxxx> CC: Peter W. Morreale <pmorreale@xxxxxxxxxx> --- kernel/sched.c | 15 +++++++++++++-- 1 files changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index a28ca9d..6ca5f4f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -359,6 +359,8 @@ struct rq { unsigned long rto_pulled; #endif struct lock_class_key rq_lock_key; + + cpumask_t rto_resched; /* Which of our peers needs rescheduling */ }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -645,6 +647,9 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) smp_wmb(); atomic_inc(&rt_overload); } + + cpus_or(rq->rto_resched, rq->rto_resched, p->cpus_allowed); + cpu_clear(rq->cpu, rq->rto_resched); # endif } #endif @@ -2213,9 +2218,15 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) * If we pushed an RT task off the runqueue, * then kick other CPUs, they might run it: */ - if (unlikely(rt_task(current) && rq->rt_nr_running > 1)) { + if (unlikely(rt_task(current) && prev->se.on_rq && rt_task(prev))) { + cpus_or(rq->rto_resched, rq->rto_resched, prev->cpus_allowed); + cpu_clear(rq->cpu, rq->rto_resched); + } + + if (unlikely(rq->rt_nr_running > 1 && !cpus_empty(rq->rto_resched))) { schedstat_inc(rq, rto_schedule); - smp_send_reschedule_allbutself(); + smp_send_reschedule_allbutself_cpumask(rq->rto_resched); + cpus_clear(rq->rto_resched); } #endif prev_state = prev->state; - To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html