[PATCH 09/11] drm/amd: hw job list should be exact

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



hw job list should be exact, so deleting job node should be in irq
handler instead of work thread.
And Calculating time of next job should be immediate as well.

Change-Id: I6d2686d84be3e7077300df7181c2a284fbcda9eb
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
Reviewed-by: Edward O'Callaghan <funfunctor at folklore1984.net>
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 38 +++++++++++++--------------
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9f4fa6e..0444df0 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -367,34 +367,32 @@ static void amd_sched_job_finish(struct work_struct *work)
 	struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
 						   finish_work);
 	struct amd_gpu_scheduler *sched = s_job->sched;
-	unsigned long flags;
-
-	/* remove job from ring_mirror_list */
-	spin_lock_irqsave(&sched->job_list_lock, flags);
-	list_del_init(&s_job->node);
-	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-		struct amd_sched_job *next;
 
-		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT)
 		cancel_delayed_work_sync(&s_job->work_tdr);
-		spin_lock_irqsave(&sched->job_list_lock, flags);
-
-		/* queue TDR for next job */
-		next = list_first_entry_or_null(&sched->ring_mirror_list,
-						struct amd_sched_job, node);
 
-		if (next)
-			schedule_delayed_work(&next->work_tdr, sched->timeout);
-	}
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 	sched->ops->free_job(s_job);
 }
 
 static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
 {
-	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
-						 finish_cb);
-	schedule_work(&job->finish_work);
+	struct amd_sched_job *s_job = container_of(cb, struct amd_sched_job,
+						   finish_cb);
+	struct amd_gpu_scheduler *sched = s_job->sched;
+	struct amd_sched_job *next;
+	unsigned long flags;
+
+	/* remove job from ring_mirror_list */
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	list_del_init(&s_job->node);
+	/* queue TDR for next job */
+	next = list_first_entry_or_null(&sched->ring_mirror_list,
+					struct amd_sched_job, node);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT) && next)
+		schedule_delayed_work(&next->work_tdr, sched->timeout);
+
+	schedule_work(&s_job->finish_work);
 }
 
 static void amd_sched_job_begin(struct amd_sched_job *s_job)
-- 
1.9.1



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux