From: Juri Lelli <juri.lelli@xxxxxxxxxx> commit 53916d5fd3c0b658de3463439dd2b7ce765072cb upstream. Currently we check for bandwidth overflow potentially due to hotplug operations at the end of sched_cpu_deactivate(), after the cpu going offline has already been removed from scheduling, active_mask, etc. This can create issues for DEADLINE tasks, as there is a substantial race window between the start of sched_cpu_deactivate() and the moment we possibly decide to roll-back the operation if dl_bw_deactivate() returns failure in cpuset_cpu_inactive(). An example is a throttled task that sees its replenishment timer firing while the cpu it was previously running on is considered offline, but before dl_bw_deactivate() had a chance to say no and roll-back happened. Fix this by directly calling dl_bw_deactivate() first thing in sched_cpu_deactivate() and do the required calculation in the former function considering the cpu passed as an argument as offline already. By doing so we also simplify sched_cpu_deactivate(), as there is no need anymore for any kind of roll-back if we fail early. Signed-off-by: Juri Lelli <juri.lelli@xxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Phil Auld <pauld@xxxxxxxxxx> Tested-by: Waiman Long <longman@xxxxxxxxxx> Link: https://lore.kernel.org/r/Zzc1DfPhbvqDDIJR@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- kernel/sched/core.c | 22 +++++++--------------- kernel/sched/deadline.c | 12 ++++++++++-- 2 files changed, 17 insertions(+), 17 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8182,19 +8182,14 @@ static void cpuset_cpu_active(void) cpuset_update_active_cpus(); } -static int cpuset_cpu_inactive(unsigned int cpu) +static void cpuset_cpu_inactive(unsigned int cpu) { if (!cpuhp_tasks_frozen) { - int ret = dl_bw_deactivate(cpu); - - if (ret) - return ret; cpuset_update_active_cpus(); } else { num_cpus_frozen++; partition_sched_domains(1, NULL, NULL); } - return 0; } static inline void sched_smt_present_inc(int cpu) @@ -8256,6 +8251,11 @@ int sched_cpu_deactivate(unsigned int cp struct rq *rq = cpu_rq(cpu); int ret; + ret = dl_bw_deactivate(cpu); + + if (ret) + return ret; + /* * Remove CPU from nohz.idle_cpus_mask to prevent participating in * load balancing when not active @@ -8301,15 +8301,7 @@ int sched_cpu_deactivate(unsigned int cp return 0; sched_update_numa(cpu, false); - ret = cpuset_cpu_inactive(cpu); - if (ret) { - sched_smt_present_inc(cpu); - sched_set_rq_online(rq, cpu); - balance_push_set(cpu, false); - set_cpu_active(cpu, true); - sched_update_numa(cpu, true); - return ret; - } + cpuset_cpu_inactive(cpu); sched_domains_numa_masks_clear(cpu); return 0; } --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -3500,6 +3500,13 @@ static int dl_bw_manage(enum dl_bw_reque break; case dl_bw_req_deactivate: /* + * cpu is not off yet, but we need to do the math by + * considering it off already (i.e., what would happen if we + * turn cpu off?). + */ + cap -= arch_scale_cpu_capacity(cpu); + + /* * cpu is going offline and NORMAL tasks will be moved away * from it. We can thus discount dl_server bandwidth * contribution as it won't need to be servicing tasks after @@ -3516,9 +3523,10 @@ static int dl_bw_manage(enum dl_bw_reque if (dl_b->total_bw - fair_server_bw > 0) { /* * Leaving at least one CPU for DEADLINE tasks seems a - * wise thing to do. + * wise thing to do. As said above, cpu is not offline + * yet, so account for that. */ - if (dl_bw_cpus(cpu)) + if (dl_bw_cpus(cpu) - 1) overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0); else overflow = 1; Patches currently in stable-queue which might be from juri.lelli@xxxxxxxxxx are queue-6.13/sched-deadline-check-bandwidth-overflow-earlier-for-hotplug.patch queue-6.13/sched-deadline-restore-dl_server-bandwidth-on-non-destructive-root-domain-changes.patch queue-6.13/sched-deadline-correctly-account-for-allocated-bandwidth-during-hotplug.patch