From: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> Instead of dealing with sched classes inside each check_preempt_curr() implementation, pull out this logic into the generic wakeup preemption path. This fixes a hang in KVM (and others) where we are waiting for the stop machine thread to run.. Tested-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> Signed-off-by: Avi Kivity <avi@xxxxxxxxxx> diff --git a/kernel/sched.c b/kernel/sched.c index d42992b..e5ad5c0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -560,18 +560,9 @@ struct rq { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -static inline -void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) -{ - rq->curr->sched_class->check_preempt_curr(rq, p, flags); - /* - * A queue event has occurred, and we're going to schedule. In - * this case, we can save a useless back to back clock update. - */ - if (test_tsk_need_resched(p)) - rq->skip_clock_update = 1; -} +static inline +void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); static inline int cpu_of(struct rq *rq) { @@ -9400,4 +9391,30 @@ void synchronize_sched_expedited(void) } EXPORT_SYMBOL_GPL(synchronize_sched_expedited); +static inline +void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +{ + const struct sched_class *class; + + if (p->sched_class == rq->curr->sched_class) + rq->curr->sched_class->check_preempt_curr(rq, p, flags); + + for_each_class(class) { + if (class == rq->curr->sched_class) + break; + if (class == p->sched_class) { + resched_task(rq->curr); + break; + } + } + + /* + * A queue event has occurred, and we're going to schedule. In + * this case, we can save a useless back to back clock update. + */ + if (test_tsk_need_resched(rq->curr)) + rq->skip_clock_update = 1; +} + + #endif /* #else #ifndef CONFIG_SMP */ diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 933f3d1..b8aebc2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; - if (unlikely(rt_prio(p->prio))) - goto preempt; - - if (unlikely(p->sched_class != &fair_sched_class)) - return; - if (unlikely(se == pse)) return; diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 45bddc0..ddc225b 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c @@ -19,7 +19,7 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p, static void check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) { - resched_task(rq->curr); /* we preempt everything */ + /* we're never preempted */ } static struct task_struct *pick_next_task_stop(struct rq *rq) -- To unsubscribe from this list: send the line "unsubscribe kvm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html