Acked-by: Luben Tuikov <luben.tuikov@xxxxxxx> Regards, Luben On 2023-01-09 08:01, Christian König wrote: > This fixes a potential memory leak of dma_fence objects in the CS code > as well as glitches in firefox because of missing pipeline sync. > > v2: use the scheduler instead of the fence context for the test > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > Bug: https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgitlab.freedesktop.org%2Fdrm%2Famd%2F-%2Fissues%2F2323&data=05%7C01%7Cluben.tuikov%40amd.com%7C78ed156f30284f9b6dfd08daf2419c17%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638088660850593361%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=R3ZsrQUOHoz4pYWRSFYQTRjsWT0wok4otsi7I%2FxJ9AA%3D&reserved=0 > Fixes: 1b2d5eda5ad7 ("drm/amdgpu: move explicit sync check into the CS") > Tested-by: Michal Kubecek <mkubecek@xxxxxxx> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 46 +++++++++++++++++--------- > 1 file changed, 30 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index 47763ac0d14a..7b5ce00f0602 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, > amdgpu_ctx_put(p->ctx); > return -ECANCELED; > } > + > + amdgpu_sync_create(&p->sync); > return 0; > } > > @@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, > } > > r = amdgpu_sync_fence(&p->sync, fence); > - if (r) > - goto error; > - > - /* > - * When we have an explicit dependency it might be necessary to insert a > - * pipeline sync to make sure that all caches etc are flushed and the > - * next job actually sees the results from the previous one. > - */ > - if (fence->context == p->gang_leader->base.entity->fence_context) > - r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); > - > -error: > dma_fence_put(fence); > return r; > } > @@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) > static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) > { > struct amdgpu_fpriv *fpriv = p->filp->driver_priv; > + struct drm_gpu_scheduler *sched; > struct amdgpu_bo_list_entry *e; > + struct dma_fence *fence; > unsigned int i; > int r; > > + r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); > + if (r) { > + if (r != -ERESTARTSYS) > + DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); > + return r; > + } > + > list_for_each_entry(e, &p->validated, tv.head) { > struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); > struct dma_resv *resv = bo->tbo.base.resv; > @@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) > return r; > } > > - r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); > - if (r && r != -ERESTARTSYS) > - DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); > - return r; > + sched = p->gang_leader->base.entity->rq->sched; > + while ((fence = amdgpu_sync_get_fence(&p->sync))) { > + struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); > + > + /* > + * When we have an dependency it might be necessary to insert a > + * pipeline sync to make sure that all caches etc are flushed and the > + * next job actually sees the results from the previous one > + * before we start executing on the same scheduler ring. > + */ > + if (!s_fence || s_fence->sched != sched) > + continue; > + > + r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); > + if (r) > + return r; > + } > + return 0; > } > > static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) > @@ -1347,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) > { > unsigned i; > > + amdgpu_sync_free(&parser->sync); > for (i = 0; i < parser->num_post_deps; i++) { > drm_syncobj_put(parser->post_deps[i].syncobj); > kfree(parser->post_deps[i].chain);