Convert runqslower to utilize raw_tp_xxx structs for accessing raw tracepoint arguments. Signed-off-by: Andrii Nakryiko <andriin@xxxxxx> --- tools/bpf/runqslower/runqslower.bpf.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c index 48a39f72fadf..3931ef9c9a6c 100644 --- a/tools/bpf/runqslower/runqslower.bpf.c +++ b/tools/bpf/runqslower/runqslower.bpf.c @@ -40,41 +40,35 @@ static int trace_enqueue(u32 tgid, u32 pid) } SEC("tp_btf/sched_wakeup") -int handle__sched_wakeup(u64 *ctx) +int handle__sched_wakeup(struct raw_tp_sched_wakeup *ctx) { /* TP_PROTO(struct task_struct *p) */ - struct task_struct *p = (void *)ctx[0]; - - return trace_enqueue(p->tgid, p->pid); + return trace_enqueue(ctx->p->tgid, ctx->p->pid); } SEC("tp_btf/sched_wakeup_new") -int handle__sched_wakeup_new(u64 *ctx) +int handle__sched_wakeup_new(struct raw_tp_sched_wakeup_new *ctx) { /* TP_PROTO(struct task_struct *p) */ - struct task_struct *p = (void *)ctx[0]; - - return trace_enqueue(p->tgid, p->pid); + return trace_enqueue(ctx->p->tgid, ctx->p->pid); } SEC("tp_btf/sched_switch") -int handle__sched_switch(u64 *ctx) +int handle__sched_switch(struct raw_tp_sched_switch *ctx) { /* TP_PROTO(bool preempt, struct task_struct *prev, * struct task_struct *next) */ - struct task_struct *prev = (struct task_struct *)ctx[1]; - struct task_struct *next = (struct task_struct *)ctx[2]; struct event event = {}; u64 *tsp, delta_us; long state; u32 pid; /* ivcsw: treat like an enqueue event and store timestamp */ - if (prev->state == TASK_RUNNING) - trace_enqueue(prev->tgid, prev->pid); + if (ctx->prev->state == TASK_RUNNING) + trace_enqueue(ctx->prev->tgid, ctx->prev->pid); - pid = next->pid; + pid = ctx->next->pid; /* fetch timestamp and calculate delta */ tsp = bpf_map_lookup_elem(&start, &pid); -- 2.17.1