These will be used by a new BPF extensible sched_class. css_tg() will be used in the init and exit paths to visit all task_groups by walking cgroups. __setscheduler_prio() is used to pick the sched_class matching the current prio of the task. For the new BPF extensible sched_class, the mapping from the task configuration to sched_class isn't static and depends on a few factors - e.g. whether the BPF progs implementing the scheduler are loaded and in a serviceable state. That mapping logic will be added to __setscheduler_prio(). When the BPF scheduler progs get loaded and unloaded, the mapping changes and the new sched_class will walk the tasks applying the new mapping using __setscheduler_prio(). v3: Dropped SCHED_CHANGE_BLOCK() as upstream is adding more generic cleanup mechanism. v2: Expose SCHED_CHANGE_BLOCK() too and update the description. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reviewed-by: David Vernet <dvernet@xxxxxxxx> Acked-by: Josh Don <joshdon@xxxxxxxxxx> Acked-by: Hao Luo <haoluo@xxxxxxxxxx> Acked-by: Barret Rhoden <brho@xxxxxxxxxx> Reported-by: kernel test robot <lkp@xxxxxxxxx> --- kernel/sched/core.c | 7 +------ kernel/sched/sched.h | 7 +++++++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 00d6751b0f8d..6818ed1a7a42 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7011,7 +7011,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag } EXPORT_SYMBOL(default_wake_function); -static void __setscheduler_prio(struct task_struct *p, int prio) +void __setscheduler_prio(struct task_struct *p, int prio) { if (dl_prio(prio)) p->sched_class = &dl_sched_class; @@ -10523,11 +10523,6 @@ void sched_move_task(struct task_struct *tsk) task_rq_unlock(rq, tsk, &rf); } -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -{ - return css ? container_of(css, struct task_group, css) : NULL; -} - static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2eb5759f4be9..71343472e6b1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -469,6 +469,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) return walk_tg_tree_from(&root_task_group, down, up, data); } +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + extern int tg_nop(struct task_group *tg, void *data); extern void free_fair_sched_group(struct task_group *tg); @@ -2390,6 +2395,8 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); +extern void __setscheduler_prio(struct task_struct *p, int prio); + extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); -- 2.41.0