3.16.36-rc1 review patch. If anyone has any objections, please let me know. ------------------ From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit 8046d6806247088de5725eaf8a2580b29e50ac5a upstream. In order to be able to use pull_rt_task() from a callback, we need to do away with the return value. Since the return value indicates if we should reschedule, do this inside the function. Since not all callers currently do this, this can increase the number of reschedules due rt balancing. Too many reschedules is not a correctness issues, too few are. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: ktkhai@xxxxxxxxxxxxx Cc: rostedt@xxxxxxxxxxx Cc: juri.lelli@xxxxxxxxx Cc: pang.xunlei@xxxxxxxxxx Cc: oleg@xxxxxxxxxx Cc: wanpeng.li@xxxxxxxxxxxxxxx Cc: umgwanakikbuti@xxxxxxxxx Link: http://lkml.kernel.org/r/20150611124742.679002000@xxxxxxxxxxxxx Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> [Conflicts: kernel/sched/rt.c] Signed-off-by: Byungchul Park <byungchul.park@xxxxxxx> [bwh: Backported to 3.16: use resched_task() instead of resched_curr()] Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx> --- kernel/sched/rt.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -244,7 +244,7 @@ int alloc_rt_sched_group(struct task_gro #ifdef CONFIG_SMP -static int pull_rt_task(struct rq *this_rq); +static void pull_rt_task(struct rq *this_rq); static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) { @@ -399,9 +399,8 @@ static inline bool need_pull_rt_task(str return false; } -static inline int pull_rt_task(struct rq *this_rq) +static inline void pull_rt_task(struct rq *this_rq) { - return 0; } static inline void queue_push_tasks(struct rq *rq) @@ -1772,14 +1771,15 @@ static void push_rt_tasks(struct rq *rq) ; } -static int pull_rt_task(struct rq *this_rq) +static void pull_rt_task(struct rq *this_rq) { - int this_cpu = this_rq->cpu, ret = 0, cpu; + int this_cpu = this_rq->cpu, cpu; + bool resched = false; struct task_struct *p; struct rq *src_rq; if (likely(!rt_overloaded(this_rq))) - return 0; + return; /* * Match the barrier from rt_set_overloaded; this guarantees that if we @@ -1836,7 +1836,7 @@ static int pull_rt_task(struct rq *this_ if (p->prio < src_rq->curr->prio) goto skip; - ret = 1; + resched = true; deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); @@ -1852,7 +1852,8 @@ skip: double_unlock_balance(this_rq, src_rq); } - return ret; + if (resched) + resched_task(this_rq->curr); } /* @@ -1948,8 +1949,7 @@ static void switched_from_rt(struct rq * if (!p->on_rq || rq->rt.rt_nr_running) return; - if (pull_rt_task(rq)) - resched_task(rq->curr); + pull_rt_task(rq); } void __init init_sched_rt_class(void) -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html