remove recover_entity for recover_rq when reset job. add recover_entity back when recover job Change-Id: Ic2e5cb6ab79d2abc49374e1770299487e327efe9 Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 0444df0..191437c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -417,9 +417,10 @@ static void amd_sched_job_timedout(struct work_struct *work) job->sched->ops->timedout_job(job); } +/* scheduler must be parked before job reset */ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *s_job; + struct amd_sched_job *s_job, *tmp; spin_lock(&sched->job_list_lock); list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { @@ -429,14 +430,6 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) } } atomic_set(&sched->hw_rq_count, 0); - spin_unlock(&sched->job_list_lock); -} - -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_job *s_job, *tmp; - - spin_lock(&sched->job_list_lock); list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { list_del_init(&s_job->node); spin_unlock(&sched->job_list_lock); @@ -444,6 +437,14 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) spin_lock(&sched->job_list_lock); } spin_unlock(&sched->job_list_lock); + amd_sched_rq_remove_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER], + &sched->recover_entity); +} + +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +{ + amd_sched_rq_add_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER], + &sched->recover_entity); } /** -- 1.9.1