On Thu, Sep 05, 2024 at 04:51:25PM +0200, Antonino Maniscalco wrote: > Add trace points corresponding to preemption being triggered and being > completed for latency measurement purposes. > > Signed-off-by: Antonino Maniscalco <antomani103@xxxxxxxxx> > Tested-by: Neil Armstrong <neil.armstrong@xxxxxxxxxx> # on SM8650-QRD > --- > drivers/gpu/drm/msm/adreno/a6xx_preempt.c | 7 +++++++ > drivers/gpu/drm/msm/msm_gpu_trace.h | 28 ++++++++++++++++++++++++++++ > 2 files changed, 35 insertions(+) > > diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c > index ec44f44d925f..ca9d36c107f2 100644 > --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c > +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c > @@ -7,6 +7,7 @@ > #include "a6xx_gpu.h" > #include "a6xx_gmu.xml.h" > #include "msm_mmu.h" > +#include "msm_gpu_trace.h" > > /* > * Try to transition the preemption state from old to new. Return > @@ -143,6 +144,8 @@ void a6xx_preempt_irq(struct msm_gpu *gpu) > > set_preempt_state(a6xx_gpu, PREEMPT_NONE); > > + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); > + > /* > * Retrigger preemption to avoid a deadlock that might occur when preemption > * is skipped due to it being already in flight when requested. > @@ -264,6 +267,10 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu) > */ > ring->skip_inline_wptr = false; > > + trace_msm_gpu_preemption_trigger( > + a6xx_gpu->cur_ring ? a6xx_gpu->cur_ring->id : -1, Can't we avoid this check? -Akhil. > + ring ? ring->id : -1); > + > spin_unlock_irqrestore(&ring->preempt_lock, flags); > > gpu_write64(gpu, > diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h > index ac40d857bc45..7f863282db0d 100644 > --- a/drivers/gpu/drm/msm/msm_gpu_trace.h > +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h > @@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume, > TP_printk("%u", __entry->dummy) > ); > > +TRACE_EVENT(msm_gpu_preemption_trigger, > + TP_PROTO(int ring_id_from, int ring_id_to), > + TP_ARGS(ring_id_from, ring_id_to), > + TP_STRUCT__entry( > + __field(int, ring_id_from) > + __field(int, ring_id_to) > + ), > + TP_fast_assign( > + __entry->ring_id_from = ring_id_from; > + __entry->ring_id_to = ring_id_to; > + ), > + TP_printk("preempting %u -> %u", > + __entry->ring_id_from, > + __entry->ring_id_to) > +); > + > +TRACE_EVENT(msm_gpu_preemption_irq, > + TP_PROTO(u32 ring_id), > + TP_ARGS(ring_id), > + TP_STRUCT__entry( > + __field(u32, ring_id) > + ), > + TP_fast_assign( > + __entry->ring_id = ring_id; > + ), > + TP_printk("preempted to %u", __entry->ring_id) > +); > + > #endif > > #undef TRACE_INCLUDE_PATH > > -- > 2.46.0 >