From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> [ Upstream commit 0c2de3f054a59f15e01804b75a04355c48de628c ] The current sched_slice() seems to have issues; there's two possible things that could be improved: - the 'nr_running' used for __sched_period() is daft when cgroups are considered. Using the RQ wide h_nr_running seems like a much more consistent number. - (esp) cgroups can slice it real fine, which makes for easy over-scheduling, ensure min_gran is what the name says. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Tested-by: Valentin Schneider <valentin.schneider@xxxxxxx> Link: https://lkml.kernel.org/r/20210412102001.611897312@xxxxxxxxxxxxx Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- kernel/sched/fair.c | 12 +++++++++++- kernel/sched/features.h | 3 +++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8c82019d9c6f..828978320e44 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -700,7 +700,13 @@ static u64 __sched_period(unsigned long nr_running) */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { - u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); + unsigned int nr_running = cfs_rq->nr_running; + u64 slice; + + if (sched_feat(ALT_PERIOD)) + nr_running = rq_of(cfs_rq)->cfs.h_nr_running; + + slice = __sched_period(nr_running + !se->on_rq); for_each_sched_entity(se) { struct load_weight *load; @@ -717,6 +723,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) } slice = __calc_delta(slice, se->load.weight, load); } + + if (sched_feat(BASE_SLICE)) + slice = max(slice, (u64)sysctl_sched_min_granularity); + return slice; } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 68d369cba9e4..f1bf5e12d889 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true) */ SCHED_FEAT(UTIL_EST, true) SCHED_FEAT(UTIL_EST_FASTUP, true) + +SCHED_FEAT(ALT_PERIOD, true) +SCHED_FEAT(BASE_SLICE, true) -- 2.30.2