- Fix a minor bug in yield (seen for CONFIG_FAIR_GROUP_SCHED) - Print nr_running and load information for cfs_rq in /proc/sched_debug - Print &rq->cfs statistics as well (usefull for group scheduling) Signed-off-by : Srivatsa Vaddagiri <vatsa@xxxxxxxxxxxxxxxxxx> Signed-off-by : Dhaval Giani <dhaval@xxxxxxxxxxxxxxxxxx> --- kernel/sched_debug.c | 2 ++ kernel/sched_fair.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) Index: current/kernel/sched_debug.c =================================================================== --- current.orig/kernel/sched_debug.c +++ current/kernel/sched_debug.c @@ -136,6 +136,8 @@ void print_cfs_rq(struct seq_file *m, in SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %ld\n", "spread0", cfs_rq->nr_sync_min_vruntime); + SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); + SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); } static void print_cpu(struct seq_file *m, int cpu) Index: current/kernel/sched_fair.c =================================================================== --- current.orig/kernel/sched_fair.c +++ current/kernel/sched_fair.c @@ -726,7 +726,7 @@ static void dequeue_task_fair(struct rq */ static void yield_task_fair(struct rq *rq) { - struct cfs_rq *cfs_rq = &rq->cfs; + struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct sched_entity *rightmost, *se = &rq->curr->se; struct rb_node *parent; @@ -1025,6 +1025,7 @@ static void print_cfs_stats(struct seq_f { struct cfs_rq *cfs_rq; + print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq(m, cpu, cfs_rq); } -- Regards, vatsa _______________________________________________ Containers mailing list Containers@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linux-foundation.org/mailman/listinfo/containers