RE: [PATCH 1/2] drm/sched: adding a new scheduling policy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[AMD Official Use Only - AMD Internal Distribution Only]

Hi Christian,

-----Original Message-----
From: Koenig, Christian <Christian.Koenig@xxxxxxx>
Sent: Friday, October 11, 2024 4:40 PM
To: Zhang, Jesse(Jie) <Jesse.Zhang@xxxxxxx>; dri-devel@xxxxxxxxxxxxxxxxxxxxx; amd-gfx@xxxxxxxxxxxxxxxxxxxxx
Cc: Deucher, Alexander <Alexander.Deucher@xxxxxxx>
Subject: Re: [PATCH 1/2] drm/sched: adding a new scheduling policy

Am 11.10.24 um 08:21 schrieb jesse.zhang@xxxxxxx:
> From: "Jesse.zhang@xxxxxxx" <jesse.zhang@xxxxxxx>
>
> Added ring ID scheduling.
> In some cases, userspace needs to run a job on a specific ring.
> Instead of selecting the best ring to run based on the ring score.
> For example, The user want to run a bad job on a specific ring to
> check whether the ring can recover from a queue reset.

Absolutely clearly a NAK, we don't want to expose the different HW rings directly to userspace.

Thanks for the confirmation. It was a bit confusing.
But now userspace can get the number of hardware rings directly via amdgpu info ioctl.

Regards
Jesse

Regards,
Christian.

>
> Signed-off-by: Jesse Zhang <Jesse.Zhang@xxxxxxx>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>   drivers/gpu/drm/etnaviv/etnaviv_sched.c  |  2 +-
>   drivers/gpu/drm/imagination/pvr_queue.c  |  2 +-
>   drivers/gpu/drm/lima/lima_sched.c        |  2 +-
>   drivers/gpu/drm/msm/msm_gem_submit.c     |  2 +-
>   drivers/gpu/drm/nouveau/nouveau_sched.c  |  2 +-
>   drivers/gpu/drm/panfrost/panfrost_job.c  |  2 +-
>   drivers/gpu/drm/scheduler/sched_entity.c | 11 +++++++++--
>   drivers/gpu/drm/scheduler/sched_main.c   |  4 ++--
>   drivers/gpu/drm/v3d/v3d_submit.c         |  2 +-
>   include/drm/gpu_scheduler.h              |  4 ++--
>   12 files changed, 22 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index d891ab779ca7..18887128a973 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1286,7 +1286,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>       int r;
>
>       for (i = 0; i < p->gang_size; ++i)
> -             drm_sched_job_arm(&p->jobs[i]->base);
> +             drm_sched_job_arm(&p->jobs[i]->base, -1);
>
>       for (i = 0; i < p->gang_size; ++i) {
>               struct dma_fence *fence;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 717adcedf096..8d75ffa9a097 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -320,7 +320,7 @@ struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
>   {
>       struct dma_fence *f;
>
> -     drm_sched_job_arm(&job->base);
> +     drm_sched_job_arm(&job->base, -1);
>       f = dma_fence_get(&job->base.s_fence->finished);
>       amdgpu_job_free_resources(job);
>       drm_sched_entity_push_job(&job->base);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 62dcfdc7894d..98d003757af1 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -107,7 +107,7 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
>        */
>       mutex_lock(&gpu->sched_lock);
>
> -     drm_sched_job_arm(&submit->sched_job);
> +     drm_sched_job_arm(&submit->sched_job, -1);
>
>       submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
>       ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
> diff --git a/drivers/gpu/drm/imagination/pvr_queue.c
> b/drivers/gpu/drm/imagination/pvr_queue.c
> index 5ed9c98fb599..ed7398a0ff21 100644
> --- a/drivers/gpu/drm/imagination/pvr_queue.c
> +++ b/drivers/gpu/drm/imagination/pvr_queue.c
> @@ -1115,7 +1115,7 @@ int pvr_queue_job_init(struct pvr_job *job)
>    */
>   struct dma_fence *pvr_queue_job_arm(struct pvr_job *job)
>   {
> -     drm_sched_job_arm(&job->base);
> +     drm_sched_job_arm(&job->base, -1);
>
>       return &job->base.s_fence->finished;
>   }
> diff --git a/drivers/gpu/drm/lima/lima_sched.c
> b/drivers/gpu/drm/lima/lima_sched.c
> index bbf3f8feab94..cc83b2aab9ce 100644
> --- a/drivers/gpu/drm/lima/lima_sched.c
> +++ b/drivers/gpu/drm/lima/lima_sched.c
> @@ -130,7 +130,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
>               return err;
>       }
>
> -     drm_sched_job_arm(&task->base);
> +     drm_sched_job_arm(&task->base, -1);
>
>       task->num_bos = num_bos;
>       task->vm = lima_vm_get(vm);
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c
> b/drivers/gpu/drm/msm/msm_gem_submit.c
> index fba78193127d..74c4e1b4df78 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -831,7 +831,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
>               goto out;
>       }
>
> -     drm_sched_job_arm(&submit->base);
> +     drm_sched_job_arm(&submit->base, -1);
>
>       submit->user_fence =
> dma_fence_get(&submit->base.s_fence->finished);
>
> diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c
> b/drivers/gpu/drm/nouveau/nouveau_sched.c
> index 32fa2e273965..3ff8142b5370 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_sched.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
> @@ -309,7 +309,7 @@ nouveau_job_submit(struct nouveau_job *job)
>       list_add(&job->entry, &sched->job.list.head);
>       spin_unlock(&sched->job.list.lock);
>
> -     drm_sched_job_arm(&job->base);
> +     drm_sched_job_arm(&job->base, -1);
>       job->done_fence = dma_fence_get(&job->base.s_fence->finished);
>       if (job->sync)
>               done_fence = dma_fence_get(job->done_fence); diff --git
> a/drivers/gpu/drm/panfrost/panfrost_job.c
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index a61ef0af9a4e..cc937420cd35 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -301,7 +301,7 @@ int panfrost_job_push(struct panfrost_job *job)
>               return ret;
>
>       mutex_lock(&pfdev->sched_lock);
> -     drm_sched_job_arm(&job->base);
> +     drm_sched_job_arm(&job->base, -1);
>
>       job->render_done_fence =
> dma_fence_get(&job->base.s_fence->finished);
>
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c
> b/drivers/gpu/drm/scheduler/sched_entity.c
> index 58c8161289fe..f4669422b3f9 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -525,7 +525,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>       return sched_job;
>   }
>
> -void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
> +void drm_sched_entity_select_rq(struct drm_sched_entity *entity, int
> +ring)
>   {
>       struct dma_fence *fence;
>       struct drm_gpu_scheduler *sched;
> @@ -554,7 +554,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
>               return;
>
>       spin_lock(&entity->rq_lock);
> -     sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
> +     if(ring >= 0) {
> +             if(entity->sched_list[ring] && entity->sched_list[ring]->ready)
> +                     sched = entity->sched_list[ring];
> +             else
> +                     sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
> +     }
> +     else
> +             sched = drm_sched_pick_best(entity->sched_list,
> +entity->num_sched_list);
>       rq = sched ? sched->sched_rq[entity->priority] : NULL;
>       if (rq != entity->rq) {
>               drm_sched_rq_remove_entity(entity->rq, entity); diff --git
> a/drivers/gpu/drm/scheduler/sched_main.c
> b/drivers/gpu/drm/scheduler/sched_main.c
> index 7e90c9f95611..356adf510670 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -833,13 +833,13 @@ EXPORT_SYMBOL(drm_sched_job_init);
>    *
>    * This can only be called if drm_sched_job_init() succeeded.
>    */
> -void drm_sched_job_arm(struct drm_sched_job *job)
> +void drm_sched_job_arm(struct drm_sched_job *job, int ring)
>   {
>       struct drm_gpu_scheduler *sched;
>       struct drm_sched_entity *entity = job->entity;
>
>       BUG_ON(!entity);
> -     drm_sched_entity_select_rq(entity);
> +     drm_sched_entity_select_rq(entity, ring);
>       sched = entity->rq->sched;
>
>       job->sched = sched;
> diff --git a/drivers/gpu/drm/v3d/v3d_submit.c
> b/drivers/gpu/drm/v3d/v3d_submit.c
> index 88f63d526b22..d33749017f93 100644
> --- a/drivers/gpu/drm/v3d/v3d_submit.c
> +++ b/drivers/gpu/drm/v3d/v3d_submit.c
> @@ -211,7 +211,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
>   static void
>   v3d_push_job(struct v3d_job *job)
>   {
> -     drm_sched_job_arm(&job->base);
> +     drm_sched_job_arm(&job->base, -1);
>
>       job->done_fence = dma_fence_get(&job->base.s_fence->finished);
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 5acc64954a88..0eab405a2683 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -553,7 +553,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
>   int drm_sched_job_init(struct drm_sched_job *job,
>                      struct drm_sched_entity *entity,
>                      u32 credits, void *owner);
> -void drm_sched_job_arm(struct drm_sched_job *job);
> +void drm_sched_job_arm(struct drm_sched_job *job, int ring);
>   int drm_sched_job_add_dependency(struct drm_sched_job *job,
>                                struct dma_fence *fence);
>   int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
> @@ -603,7 +603,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
>   void drm_sched_entity_fini(struct drm_sched_entity *entity);
>   void drm_sched_entity_destroy(struct drm_sched_entity *entity);
> -void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
> +void drm_sched_entity_select_rq(struct drm_sched_entity *entity, int
> +ring);
>   struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
>   void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
>   void drm_sched_entity_set_priority(struct drm_sched_entity *entity,





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux