exec_clock already provides per-group cpu usage metrics, and can be reused by cpuacct in case cpu and cpuacct are comounted. However, it is only provided by tasks in fair class. Doing the same for rt is easy, and can be done in an already existing hierarchy loop. This is an improvement over the independent hierarchy walk executed by cpuacct. Signed-off-by: Glauber Costa <glommer@xxxxxxxxxxxxx> CC: Dave Jones <davej@xxxxxxxxxx> CC: Ben Hutchings <ben@xxxxxxxxxxxxxxx> CC: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> CC: Paul Turner <pjt@xxxxxxxxxx> CC: Lennart Poettering <lennart@xxxxxxxxxxxxxx> CC: Kay Sievers <kay.sievers@xxxxxxxx> CC: Tejun Heo <tj@xxxxxxxxxx> --- kernel/sched/rt.c | 1 + kernel/sched/sched.h | 3 +++ 2 files changed, 4 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f7e05d87..7f6f6c6 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -945,6 +945,7 @@ static void update_curr_rt(struct rq *rq) for_each_sched_rt_entity(rt_se) { rt_rq = rt_rq_of_se(rt_se); + schedstat_add(rt_rq, exec_clock, delta_exec); if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { raw_spin_lock(&rt_rq->rt_runtime_lock); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 84a339d..01ca8a4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -210,6 +210,7 @@ struct cfs_rq { unsigned int nr_running, h_nr_running; u64 exec_clock; + u64 prev_exec_clock; u64 min_vruntime; #ifndef CONFIG_64BIT u64 min_vruntime_copy; @@ -312,6 +313,8 @@ struct rt_rq { struct plist_head pushable_tasks; #endif int rt_throttled; + u64 exec_clock; + u64 prev_exec_clock; u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ -- 1.7.11.7 -- To unsubscribe from this list: send the line "unsubscribe cgroups" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html