This is a note to let you know that I've just added the patch titled sched: Remove vruntime from trace_sched_stat_runtime() to the 6.6-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: sched-remove-vruntime-from-trace_sched_stat_runtime.patch and it can be found in the queue-6.6 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 13751b8fb97f75c813b7992dba41d320a0f2489a Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Date: Mon Nov 6 13:41:43 2023 +0100 sched: Remove vruntime from trace_sched_stat_runtime() [ Upstream commit 5fe6ec8f6ab549b6422e41551abb51802bd48bc7 ] Tracing the runtime delta makes sense, observer can sum over time. Tracing the absolute vruntime makes less sense, inconsistent: absolute-vs-delta, but also vruntime delta can be computed from runtime delta. Removing the vruntime thing also makes the two tracepoint sites identical, allowing to unify the code in a later patch. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Stable-dep-of: 0664e2c311b9 ("sched/deadline: Fix warning in migrate_enable for boosted tasks") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 010ba1b7cb0ea..bdb1e838954af 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, */ DECLARE_EVENT_CLASS(sched_stat_runtime, - TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + TP_PROTO(struct task_struct *tsk, u64 runtime), - TP_ARGS(tsk, __perf_count(runtime), vruntime), + TP_ARGS(tsk, __perf_count(runtime)), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( u64, runtime ) - __field( u64, vruntime ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->runtime = runtime; - __entry->vruntime = vruntime; ), - TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", + TP_printk("comm=%s pid=%d runtime=%Lu [ns]", __entry->comm, __entry->pid, - (unsigned long long)__entry->runtime, - (unsigned long long)__entry->vruntime) + (unsigned long long)__entry->runtime) ); DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, - TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), - TP_ARGS(tsk, runtime, vruntime)); + TP_PROTO(struct task_struct *tsk, u64 runtime), + TP_ARGS(tsk, runtime)); /* * Tracepoint for showing priority inheritance modifying a tasks diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3e9333466438c..062447861d8e6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1185,8 +1185,7 @@ s64 update_curr_common(struct rq *rq) if (unlikely(delta_exec <= 0)) return delta_exec; - trace_sched_stat_runtime(curr, delta_exec, 0); - + trace_sched_stat_runtime(curr, delta_exec); account_group_exec_runtime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec); @@ -1215,7 +1214,7 @@ static void update_curr(struct cfs_rq *cfs_rq) if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); - trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); + trace_sched_stat_runtime(curtask, delta_exec); cgroup_account_cputime(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); }