Am 06.12.19 um 18:33 schrieb Nirmoy Das:
entity should not keep copy and maintain sched list for
itself.
That is a good step, but we need to take this further.
The sched_list is static for the whole device and we shouldn't allocate
it inside the context at all.
Christian.
Signed-off-by: Nirmoy Das <nirmoy.das@xxxxxxx>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 11 ++++++++---
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 1 +
drivers/gpu/drm/scheduler/sched_entity.c | 12 +-----------
3 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c7643af8827f..1939b414d23b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -71,7 +71,6 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip)
{
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
- struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
struct amdgpu_device *adev = ctx->adev;
unsigned num_rings = 0;
unsigned num_scheds = 0;
@@ -129,16 +128,21 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip)
break;
}
+ ctx->sched_list = kcalloc(num_rings,
+ sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
+ if (ctx->sched_list == NULL )
+ return -ENOMEM;
+
for (i = 0; i < num_rings; ++i) {
if (!rings[i]->adev)
continue;
- sched_list[num_scheds++] = &rings[i]->sched;
+ ctx->sched_list[num_scheds++] = &rings[i]->sched;
}
for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i)
r = drm_sched_entity_init(&ctx->entities[hw_ip][i].entity,
- ctx->init_priority, sched_list, num_scheds, &ctx->guilty);
+ ctx->init_priority, ctx->sched_list, num_scheds, &ctx->guilty);
if (r)
goto error_cleanup_entities;
@@ -228,6 +232,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
for (j = 0; j < amdgpu_sched_jobs; ++j)
dma_fence_put(ctx->entities[0][i].fences[j]);
kfree(ctx->fences);
+ kfree(ctx->sched_list);
kfree(ctx->entities[0]);
mutex_destroy(&ctx->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index da808633732b..9fd02cc47352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -44,6 +44,7 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
+ struct drm_gpu_scheduler **sched_list;
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index f9b6ce29c58f..c84a9a66f7f0 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -56,8 +56,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
unsigned int num_sched_list,
atomic_t *guilty)
{
- int i;
-
if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
@@ -67,17 +65,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
- entity->sched_list = kcalloc(num_sched_list,
- sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
-
- if(!entity->sched_list)
- return -ENOMEM;
+ entity->sched_list = sched_list;
init_completion(&entity->entity_idle);
- for (i = 0; i < num_sched_list; i++)
- entity->sched_list[i] = sched_list[i];
-
if (num_sched_list)
entity->rq = &entity->sched_list[0]->sched_rq[entity->priority];
@@ -312,7 +303,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
- kfree(entity->sched_list);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx