Move tg_weight() upward and make cpu_shares_read_u64() use it too. This makes the weight retrieval shared between cgroup v1 and v2 paths and will be used to implement cgroup support for sched_ext. No functional changes. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> --- kernel/sched/core.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 49ddc7bc63f5..43a62e9ada84 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9170,6 +9170,11 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) #endif /* CONFIG_UCLAMP_TASK_GROUP */ #ifdef CONFIG_FAIR_GROUP_SCHED +static unsigned long tg_weight(struct task_group *tg) +{ + return scale_load_down(tg->shares); +} + static int cpu_shares_write_u64(struct cgroup_subsys_state *css, struct cftype *cftype, u64 shareval) { @@ -9181,9 +9186,7 @@ static int cpu_shares_write_u64(struct cgroup_subsys_state *css, static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { - struct task_group *tg = css_tg(css); - - return (u64) scale_load_down(tg->shares); + return tg_weight(css_tg(css)); } #ifdef CONFIG_CFS_BANDWIDTH @@ -9685,11 +9688,6 @@ static int cpu_local_stat_show(struct seq_file *sf, #ifdef CONFIG_FAIR_GROUP_SCHED -static unsigned long tg_weight(struct task_group *tg) -{ - return scale_load_down(tg->shares); -} - static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { -- 2.46.0