The following commit has been merged into the sched/core branch of tip: Commit-ID: cea5a3472ac43f18590e1bd6b842f808347a810c Gitweb: https://git.kernel.org/tip/cea5a3472ac43f18590e1bd6b842f808347a810c Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Fri, 26 Jul 2024 16:27:04 +02:00 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitterDate: Mon, 29 Jul 2024 12:22:37 +02:00 sched/fair: Cleanup fair_server The throttle interaction made my brain hurt, make it consistently about 0 transitions of h_nr_running. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- kernel/sched/fair.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee251ac..795ceef 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5849,10 +5849,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) /* At this point se is NULL and we are at root level*/ sub_nr_running(rq, task_delta); -done: /* Stop the fair server if throttling resulted in no runnable tasks */ if (rq_h_nr_running && !rq->cfs.h_nr_running) dl_server_stop(&rq->fair_server); +done: /* * Note: distribution will already see us throttled via the * throttled-list. rq->lock protects completion. @@ -5940,16 +5940,16 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) goto unthrottle_throttle; } + /* Start the fair server if un-throttling resulted in new runnable tasks */ + if (!rq_h_nr_running && rq->cfs.h_nr_running) + dl_server_start(&rq->fair_server); + /* At this point se is NULL and we are at root level*/ add_nr_running(rq, task_delta); unthrottle_throttle: assert_list_leaf_cfs_rq(rq); - /* Start the fair server if un-throttling resulted in new runnable tasks */ - if (!rq_h_nr_running && rq->cfs.h_nr_running) - dl_server_start(&rq->fair_server); - /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); @@ -6771,6 +6771,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; int idle_h_nr_running = task_has_idle_policy(p); int task_new = !(flags & ENQUEUE_WAKEUP); + int rq_h_nr_running = rq->cfs.h_nr_running; /* * The code below (indirectly) updates schedutil which looks at @@ -6780,13 +6781,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) */ util_est_enqueue(&rq->cfs, p); - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) { - /* Account for idle runtime */ - if (!rq->nr_running) - dl_server_update_idle_time(rq, rq->curr); - dl_server_start(&rq->fair_server); - } - /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag @@ -6832,6 +6826,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) goto enqueue_throttle; } + if (!rq_h_nr_running && rq->cfs.h_nr_running) { + /* Account for idle runtime */ + if (!rq->nr_running) + dl_server_update_idle_time(rq, rq->curr); + dl_server_start(&rq->fair_server); + } + /* At this point se is NULL and we are at root level*/ add_nr_running(rq, 1); @@ -6872,6 +6873,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) int task_sleep = flags & DEQUEUE_SLEEP; int idle_h_nr_running = task_has_idle_policy(p); bool was_sched_idle = sched_idle_rq(rq); + int rq_h_nr_running = rq->cfs.h_nr_running; util_est_dequeue(&rq->cfs, p); @@ -6926,14 +6928,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* At this point se is NULL and we are at root level*/ sub_nr_running(rq, 1); + if (rq_h_nr_running && !rq->cfs.h_nr_running) + dl_server_stop(&rq->fair_server); + /* balance early to pull high priority tasks */ if (unlikely(!was_sched_idle && sched_idle_rq(rq))) rq->next_balance = jiffies; dequeue_throttle: - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) - dl_server_stop(&rq->fair_server); - util_est_update(&rq->cfs, p, task_sleep); hrtick_update(rq); }