Looks like for correct debugging we need to know the scheduler even earlier. So move picking a rq for an entity into job creation. Signed-off-by: Christian König <christian.koenig@xxxxxxx> --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 ++++++++++++++++++++----------- drivers/gpu/drm/scheduler/sched_fence.c | 2 +- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index bd7883d1b964..bb2bd4c07e85 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -552,6 +552,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) return sched_job; } +/** + * drm_sched_entity_select_rq - select a new rq for the entity + * + * @entity: scheduler entity + * + * Check all prerequisites and select a new rq for the entity for load + * balancing. + */ +static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{ + struct dma_fence *fence; + struct drm_sched_rq *rq; + + if (!spsc_queue_count(&entity->job_queue) == 0 || + entity->num_rq_list <= 1) + return; + + fence = READ_ONCE(entity->last_scheduled); + if (fence && !dma_fence_is_signaled(fence)) + return; + + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); +} + /** * drm_sched_entity_push_job - Submit a job to the entity's job queue * @@ -567,25 +595,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { - struct drm_sched_rq *rq = entity->rq; bool first; - first = spsc_queue_count(&entity->job_queue) == 0; - if (first && (entity->num_rq_list > 1)) { - struct dma_fence *fence; - - fence = READ_ONCE(entity->last_scheduled); - if (fence == NULL || dma_fence_is_signaled(fence)) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); - } - } - - sched_job->sched = entity->rq->sched; - sched_job->s_fence->sched = entity->rq->sched; trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); @@ -790,7 +801,10 @@ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner) { - struct drm_gpu_scheduler *sched = entity->rq->sched; + struct drm_gpu_scheduler *sched; + + drm_sched_entity_select_rq(entity); + sched = entity->rq->sched; job->sched = sched; job->entity = entity; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 6dab18d288d7..4029312fdd81 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return NULL; fence->owner = owner; - fence->sched = NULL; + fence->sched = entity->rq->sched; spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq); -- 2.14.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel