With the dedicated latency list, we don't have to take care of this special case anymore as pick_next_entity checks for a runnable latency sensitive task. Signed-off-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Tested-by: K Prateek Nayak <kprateek.nayak@xxxxxxx> --- kernel/sched/fair.c | 34 ++-------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a8f0e32431e2..fb2a5b2e3440 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6240,35 +6240,6 @@ static int sched_idle_cpu(int cpu) } #endif -static void set_next_buddy(struct sched_entity *se); - -static void check_preempt_from_others(struct cfs_rq *cfs, struct sched_entity *se) -{ - struct sched_entity *next; - - if (se->latency_offset >= 0) - return; - - if (cfs->nr_running <= 1) - return; - /* - * When waking from another class, we don't need to check to preempt at - * wakeup and don't set next buddy as a candidate for being picked in - * priority. - * In case of simultaneous wakeup when current is another class, the - * latency sensitive tasks lost opportunity to preempt non sensitive - * tasks which woke up simultaneously. - */ - - if (cfs->next) - next = cfs->next; - else - next = __pick_first_entity(cfs); - - if (next && wakeup_preempt_entity(next, se) == 1) - set_next_buddy(se); -} - /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -6355,15 +6326,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!task_new) update_overutilized_status(rq); - if (rq->curr->sched_class != &fair_sched_class) - check_preempt_from_others(cfs_rq_of(&p->se), &p->se); - enqueue_throttle: assert_list_leaf_cfs_rq(rq); hrtick_update(rq); } +static void set_next_buddy(struct sched_entity *se); + /* * The dequeue_task method is called before nr_running is * decreased. We remove the task from the rbtree and -- 2.34.1