Update comments to ease RT throttling understanding. Signed-off-by: Michal Koutný <mkoutny@xxxxxxxx> --- kernel/sched/sched.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d8d28c3d1ac5f..5c32c23915810 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -812,17 +812,17 @@ struct rt_rq { #ifdef CONFIG_RT_GROUP_SCHED int rt_throttled; - u64 rt_time; - u64 rt_runtime; + u64 rt_time; /* consumed RT time, goes up in update_curr_rt */ + u64 rt_runtime; /* allotted RT time, "slice" from rt_bandwidth, RT sharing/balancing */ /* Nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; unsigned int rt_nr_boosted; - struct rq *rq; + struct rq *rq; /* this is always top-level rq, cache? */ #endif #ifdef CONFIG_CGROUP_SCHED - struct task_group *tg; + struct task_group *tg; /* this tg has "this" rt_rq on given CPU for runnable entities */ #endif }; -- 2.47.1