From: Rob Clark <robdclark@xxxxxxxxxxxx> Container fences have burner contexts, which makes the trick to store at most one fence per context somewhat useless if we don't unwrap array or chain fences. Signed-off-by: Rob Clark <robdclark@xxxxxxxxxxxx> --- tbh, I'm not sure why we weren't doing this already, unless there is something I'm overlooking drivers/gpu/drm/scheduler/sched_main.c | 43 +++++++++++++++++--------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c2ee44d6224b..f59e5335afbb 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,20 +41,21 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on * the hardware. * * The jobs in a entity are always scheduled in the order that they were pushed. */ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/dma-fence-unwrap.h> #include <linux/dma-resv.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> #include <drm/drm_gem.h> #include <drm/gpu_scheduler.h> #include <drm/spsc_queue.h> #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" @@ -665,41 +666,27 @@ void drm_sched_job_arm(struct drm_sched_job *job) sched = entity->rq->sched; job->sched = sched; job->s_priority = entity->rq - sched->sched_rq; job->id = atomic64_inc_return(&sched->job_id_count); drm_sched_fence_init(job->s_fence, job->entity); } EXPORT_SYMBOL(drm_sched_job_arm); -/** - * drm_sched_job_add_dependency - adds the fence as a job dependency - * @job: scheduler job to add the dependencies to - * @fence: the dma_fence to add to the list of dependencies. - * - * Note that @fence is consumed in both the success and error cases. - * - * Returns: - * 0 on success, or an error on failing to expand the array. - */ -int drm_sched_job_add_dependency(struct drm_sched_job *job, - struct dma_fence *fence) +static int _add_dependency(struct drm_sched_job *job, struct dma_fence *fence) { struct dma_fence *entry; unsigned long index; u32 id = 0; int ret; - if (!fence) - return 0; - /* Deduplicate if we already depend on a fence from the same context. * This lets the size of the array of deps scale with the number of * engines involved, rather than the number of BOs. */ xa_for_each(&job->dependencies, index, entry) { if (entry->context != fence->context) continue; if (dma_fence_is_later(fence, entry)) { dma_fence_put(entry); @@ -709,20 +696,46 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job, } return 0; } ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); if (ret != 0) dma_fence_put(fence); return ret; } + +/** + * drm_sched_job_add_dependency - adds the fence as a job dependency + * @job: scheduler job to add the dependencies to + * @fence: the dma_fence to add to the list of dependencies. + * + * Note that @fence is consumed in both the success and error cases. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_sched_job_add_dependency(struct drm_sched_job *job, + struct dma_fence *fence) +{ + struct dma_fence_unwrap iter; + struct dma_fence *f; + int ret = 0; + + dma_fence_unwrap_for_each (f, &iter, fence) { + ret = _add_dependency(job, f); + if (ret) + break; + } + + return ret; +} EXPORT_SYMBOL(drm_sched_job_add_dependency); /** * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job * @job: scheduler job to add the dependencies to * @resv: the dma_resv object to get the fences from * @usage: the dma_resv_usage to use to filter the fences * * This adds all fences matching the given usage from @resv to @job. * Must be called with the @resv lock held. -- 2.39.2