Re: [PATCH v6 4/7] drm/sched: cleanup gpu_scheduler trace events

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





Le 14/11/2024 à 12:50, Tvrtko Ursulin a écrit :

On 14/11/2024 10:01, Pierre-Eric Pelloux-Prayer wrote:
A fence uniquely identify a job, so this commits updates the places
where a kernel pointer was used as an identifier by:

    "fence=(context:%llu, seqno:%lld)"

Any special reason for %lld? Planning to use like -1 for unknown or something?


Indeed, %llu is better.


Btw in i915 we use fence=%llu:%llu which is more compact and IMO still readable so perhaps something to consider.

I'll switch to "fence=%llu:%llu" unless others have concerns.

Thanks,
Pierre-Eric


Regards,

Tvrtko

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@xxxxxxx>
---
  .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
  1 file changed, 22 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/ gpu_scheduler_trace.h
index c4ec28540656..24358c4d5bbe 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
          TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
          TP_ARGS(sched_job, entity),
          TP_STRUCT__entry(
-                 __field(struct drm_sched_entity *, entity)
-                 __field(struct dma_fence *, fence)
                   __string(name, sched_job->sched->name)
                   __field(uint64_t, id)
                   __field(u32, job_count)
                   __field(int, hw_job_count)
                   __string(dev, dev_name(sched_job->sched->dev))
+                 __field(uint64_t, fence_context)
+                 __field(uint64_t, fence_seqno)
                   ),
          TP_fast_assign(
-               __entry->entity = entity;
                 __entry->id = sched_job->id;
-               __entry->fence = &sched_job->s_fence->finished;
                 __assign_str(name);
                 __entry->job_count = spsc_queue_count(&entity->job_queue);
                 __entry->hw_job_count = atomic_read(
                     &sched_job->sched->credit_count);
                 __assign_str(dev);
+               __entry->fence_context = sched_job->s_fence->finished.context;
+               __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+
                 ),
-        TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
-              __get_str(dev), __entry->entity, __entry->id,
-              __entry->fence, __get_str(name),
+        TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw job count:%d",
+              __get_str(dev), __entry->id,
+              __entry->fence_context, __entry->fence_seqno, __get_str(name),
                __entry->job_count, __entry->hw_job_count)
  );
@@ -75,37 +76,41 @@ TRACE_EVENT(drm_sched_process_job,
          TP_PROTO(struct drm_sched_fence *fence),
          TP_ARGS(fence),
          TP_STRUCT__entry(
-            __field(struct dma_fence *, fence)
+            __field(uint64_t, fence_context)
+            __field(uint64_t, fence_seqno)
              ),
          TP_fast_assign(
-            __entry->fence = &fence->finished;
+            __entry->fence_context = fence->finished.context;
+            __entry->fence_seqno = fence->finished.seqno;
              ),
-        TP_printk("fence=%p signaled", __entry->fence)
+        TP_printk("fence=(context:%llu, seqno:%lld) signaled",
+              __entry->fence_context, __entry->fence_seqno)
  );
  TRACE_EVENT(drm_sched_job_wait_dep,
          TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
          TP_ARGS(sched_job, fence),
          TP_STRUCT__entry(
-                 __string(name, sched_job->sched->name)
+                 __field(uint64_t, fence_context)
+                 __field(uint64_t, fence_seqno)
                   __field(uint64_t, id)
                   __field(struct dma_fence *, fence)
                   __field(uint64_t, ctx)
-                 __field(unsigned, seqno)
+                 __field(uint64_t, seqno)
                   ),
          TP_fast_assign(
-               __assign_str(name);
+               __entry->fence_context = sched_job->s_fence->finished.context;
+               __entry->fence_seqno = sched_job->s_fence->finished.seqno;
                 __entry->id = sched_job->id;
                 __entry->fence = fence;
                 __entry->ctx = fence->context;
                 __entry->seqno = fence->seqno;
                 ),
-        TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
-              __get_str(name), __entry->id,
-              __entry->fence, __entry->ctx,
-              __entry->seqno)
+        TP_printk("fence=(context:%llu, seqno:%lld), id=%llu, dependencies:{(context:%llu, seqno: %lld)}",
+              __entry->fence_context, __entry->fence_seqno, __entry->id,
+              __entry->ctx, __entry->seqno)
  );
  #endif



[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux