A unique id is useful for debugging and tracing. Intended to replace pointers in ftrace output. Signed-off-by: Andres Rodriguez <andresx7 at gmail.com> --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 2 ++ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 1bf83ed..9ee761e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -452,20 +452,21 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) /* init a sched_job with basic field */ int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, void *owner) { job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); + job->id = atomic64_inc_return(&sched->job_id_count); if (!job->s_fence) return -ENOMEM; INIT_WORK(&job->finish_work, amd_sched_job_finish); INIT_LIST_HEAD(&job->node); INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); return 0; } @@ -609,20 +610,21 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, sched->name = name; sched->timeout = timeout; for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) amd_sched_rq_init(&sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); + atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); if (IS_ERR(sched->thread)) { DRM_ERROR("Failed to create scheduler for %s.\n", name); return PTR_ERR(sched->thread); } return 0; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index d8dc681..e037b1e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -73,20 +73,21 @@ struct amd_sched_fence { }; struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; struct dma_fence_cb finish_cb; struct work_struct finish_work; struct list_head node; struct delayed_work work_tdr; + uint64_t id; }; extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; extern const struct dma_fence_ops amd_sched_fence_ops_finished; static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) { if (f->ops == &amd_sched_fence_ops_scheduled) return container_of(f, struct amd_sched_fence, scheduled); if (f->ops == &amd_sched_fence_ops_finished) @@ -117,20 +118,21 @@ enum amd_sched_priority { */ struct amd_gpu_scheduler { const struct amd_sched_backend_ops *ops; uint32_t hw_submission_limit; long timeout; const char *name; struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY]; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; + atomic64_t job_id_count; struct task_struct *thread; struct list_head ring_mirror_list; spinlock_t job_list_lock; }; int amd_sched_init(struct amd_gpu_scheduler *sched, const struct amd_sched_backend_ops *ops, uint32_t hw_submission, long timeout, const char *name); void amd_sched_fini(struct amd_gpu_scheduler *sched); -- 2.9.3