Thanks to kernel cmdline being available early, before any cgroup hierarchy exists, we can achieve the RT_GROUP_SCHED boottime disabling goal by simply skipping any creation (and destruction) of RT_GROUP data and its exposure via RT attributes. We can do this thanks to previously placed runtime guards that would redirect all operations to root_task_group's data when RT_GROUP_SCHED disabled. Signed-off-by: Michal Koutný <mkoutny@xxxxxxxx> --- kernel/sched/core.c | 36 ++++++++++++++++++++++++------------ kernel/sched/rt.c | 9 +++++++++ 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6e21e0885557d..300a1a83e1a3c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9819,18 +9819,6 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_cfs_local_stat_show, }, #endif -#ifdef CONFIG_RT_GROUP_SCHED - { - .name = "rt_runtime_us", - .read_s64 = cpu_rt_runtime_read, - .write_s64 = cpu_rt_runtime_write, - }, - { - .name = "rt_period_us", - .read_u64 = cpu_rt_period_read_uint, - .write_u64 = cpu_rt_period_write_uint, - }, -#endif #ifdef CONFIG_UCLAMP_TASK_GROUP { .name = "uclamp.min", @@ -9849,6 +9837,20 @@ static struct cftype cpu_legacy_files[] = { }; #ifdef CONFIG_RT_GROUP_SCHED +static struct cftype rt_group_files[] = { + { + .name = "rt_runtime_us", + .read_s64 = cpu_rt_runtime_read, + .write_s64 = cpu_rt_runtime_write, + }, + { + .name = "rt_period_us", + .read_u64 = cpu_rt_period_read_uint, + .write_u64 = cpu_rt_period_write_uint, + }, + { } /* Terminate */ +}; + # ifdef RT_GROUP_SCHED_DEFAULT_DISABLED DEFINE_STATIC_KEY_FALSE(rt_group_sched); # else @@ -9871,6 +9873,16 @@ static int __init setup_rt_group_sched(char *str) return 1; } __setup("rt_group_sched=", setup_rt_group_sched); + +static int __init cpu_rt_group_init(void) +{ + if (!rt_group_sched_enabled()) + return 0; + + WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files)); + return 0; +} +subsys_initcall(cpu_rt_group_init); #endif /* CONFIG_RT_GROUP_SCHED */ static int cpu_extra_stat_show(struct seq_file *sf, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 161d91f7479b4..db7cdc82003bd 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -195,6 +195,9 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) void unregister_rt_sched_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) + return; + if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); } @@ -203,6 +206,9 @@ void free_rt_sched_group(struct task_group *tg) { int i; + if (!rt_group_sched_enabled()) + return; + for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); @@ -247,6 +253,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) struct sched_rt_entity *rt_se; int i; + if (!rt_group_sched_enabled()) + return 1; + tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); if (!tg->rt_rq) goto err; -- 2.47.1