These will be used by a new BPF extensible sched_class. css_tg() will be used in the init and exit paths to visit all task_groups by walking cgroups. __setscheduler_prio() is used to pick the sched_class matching the current prio of the task. For the new BPF extensible sched_class, the mapping from the task configuration to sched_class isn't static and depends on a few factors - e.g. whether the BPF progs implementing the scheduler are loaded and in a serviceable state. That mapping logic will be added to __setscheduler_prio(). When the BPF scheduler progs get loaded and unloaded, the mapping changes and the new sched_class will walk the tasks applying the new mapping using __setscheduler_prio(). v3: Dropped SCHED_CHANGE_BLOCK() as upstream is adding more generic cleanup mechanism. v2: Expose SCHED_CHANGE_BLOCK() too and update the description. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reviewed-by: David Vernet <dvernet@xxxxxxxx> Acked-by: Josh Don <joshdon@xxxxxxxxxx> Acked-by: Hao Luo <haoluo@xxxxxxxxxx> Acked-by: Barret Rhoden <brho@xxxxxxxxxx> Reported-by: kernel test robot <lkp@xxxxxxxxx> --- kernel/sched/core.c | 7 +------ kernel/sched/sched.h | 7 +++++++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9b60df944263..987209c0e672 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7098,7 +7098,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag } EXPORT_SYMBOL(default_wake_function); -static void __setscheduler_prio(struct task_struct *p, int prio) +void __setscheduler_prio(struct task_struct *p, int prio) { if (dl_prio(prio)) p->sched_class = &dl_sched_class; @@ -10542,11 +10542,6 @@ void sched_move_task(struct task_struct *tsk) } } -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -{ - return css ? container_of(css, struct task_group, css) : NULL; -} - static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24b3d120700b..7e0de4cb5a52 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -477,6 +477,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) return walk_tg_tree_from(&root_task_group, down, up, data); } +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + extern int tg_nop(struct task_group *tg, void *data); #ifdef CONFIG_FAIR_GROUP_SCHED @@ -2481,6 +2486,8 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); +extern void __setscheduler_prio(struct task_struct *p, int prio); + extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); -- 2.44.0