It will be used to recover hw jobs. Change-Id: I5508f5ffa04909b480ddd669dfb297e5059eba04 Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 24 ++++++++++++++++++++---- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 1 + 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index a15fd88..36f5805 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -635,7 +635,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, const struct amd_sched_backend_ops *ops, unsigned hw_submission, long timeout, const char *name) { - int i; + int i, r; sched->ops = ops; sched->hw_submission_limit = hw_submission; sched->name = name; @@ -648,22 +648,37 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); + r = amd_sched_entity_init(sched, &sched->recover_entity, + &sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER], + hw_submission); + if (r) + return r; if (atomic_inc_return(&sched_fence_slab_ref) == 1) { sched_fence_slab = kmem_cache_create( "amd_sched_fence", sizeof(struct amd_sched_fence), 0, SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; + if (!sched_fence_slab) { + r = -ENOMEM; + goto err1; + } } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); if (IS_ERR(sched->thread)) { DRM_ERROR("Failed to create scheduler for %s.\n", name); - return PTR_ERR(sched->thread); + r = PTR_ERR(sched->thread); + goto err2; } return 0; +err2: + if (atomic_dec_and_test(&sched_fence_slab_ref)) + kmem_cache_destroy(sched_fence_slab); + +err1: + amd_sched_entity_fini(sched, &sched->recover_entity); + return r; } /** @@ -677,4 +692,5 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) kthread_stop(sched->thread); if (atomic_dec_and_test(&sched_fence_slab_ref)) kmem_cache_destroy(sched_fence_slab); + amd_sched_entity_fini(sched, &sched->recover_entity); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index cd87bc7..8245316 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -133,6 +133,7 @@ struct amd_gpu_scheduler { struct task_struct *thread; struct list_head ring_mirror_list; spinlock_t job_list_lock; + struct amd_sched_entity recover_entity; }; int amd_sched_init(struct amd_gpu_scheduler *sched, -- 1.9.1