Only simple implementation with a static key wrapper, it will be wired in later. Signed-off-by: Michal Koutný <mkoutny@xxxxxxxx> --- .../admin-guide/kernel-parameters.txt | 5 ++++ init/Kconfig | 11 ++++++++ kernel/sched/core.c | 25 +++++++++++++++++++ kernel/sched/sched.h | 17 +++++++++++++ 4 files changed, 58 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3872bc6ec49d6..1c890c9ad8716 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5937,6 +5937,11 @@ Memory area to be used by remote processor image, managed by CMA. + rt_group_sched= [KNL] Enable or disable SCHED_RR/FIFO group scheduling + when CONFIG_RT_GROUP_SCHED=y. Defaults to + !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED. + Format: <bool> + rw [KNL] Mount root device read-write on boot S [KNL] Run init in single mode diff --git a/init/Kconfig b/init/Kconfig index a20e6efd3f0fb..7823e5ac0311d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1076,6 +1076,17 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. +config RT_GROUP_SCHED_DEFAULT_DISABLED + bool "Require boot parameter to enable group scheduling for SCHED_RR/FIFO" + depends on RT_GROUP_SCHED + default n + help + When set, the RT group scheduling is disabled by default. The option + is in inverted form so that mere RT_GROUP_SCHED enables the group + scheduling. + + Say N if unsure. + config EXT_GROUP_SCHED bool depends on SCHED_CLASS_EXT && CGROUP_SCHED diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c6d8232ad9eea..47898f895a5a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9844,6 +9844,31 @@ static struct cftype cpu_legacy_files[] = { { } /* Terminate */ }; +#ifdef CONFIG_RT_GROUP_SCHED +# ifdef RT_GROUP_SCHED_DEFAULT_DISABLED +DEFINE_STATIC_KEY_FALSE(rt_group_sched); +# else +DEFINE_STATIC_KEY_TRUE(rt_group_sched); +# endif + +static int __init setup_rt_group_sched(char *str) +{ + long val; + + if (kstrtol(str, 0, &val) || val < 0 || val > 1) { + pr_warn("Unable to set rt_group_sched\n"); + return 1; + } + if (val) + static_branch_enable(&rt_group_sched); + else + static_branch_disable(&rt_group_sched); + + return 1; +} +__setup("rt_group_sched=", setup_rt_group_sched); +#endif /* CONFIG_RT_GROUP_SCHED */ + static int cpu_extra_stat_show(struct seq_file *sf, struct cgroup_subsys_state *css) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 38325bd32a0e0..1c457dc1472a3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1501,6 +1501,23 @@ static inline bool sched_group_cookie_match(struct rq *rq, } #endif /* !CONFIG_SCHED_CORE */ +#ifdef CONFIG_RT_GROUP_SCHED +# ifdef RT_GROUP_SCHED_DEFAULT_DISABLED +DECLARE_STATIC_KEY_FALSE(rt_group_sched); +static inline bool rt_group_sched_enabled(void) +{ + return static_branch_unlikely(&rt_group_sched); +} +# else +DECLARE_STATIC_KEY_TRUE(rt_group_sched); +static inline bool rt_group_sched_enabled(void) +{ + return static_branch_likely(&rt_group_sched); +} +# endif /* RT_GROUP_SCHED_DEFAULT_DISABLED */ +#else +# define rt_group_sched_enabled() false +#endif /* CONFIG_RT_GROUP_SCHED */ static inline void lockdep_assert_rq_held(struct rq *rq) { -- 2.47.1