With this we can now terminate jobs enqueue into SW queue the moment the task is being killed instead of waiting for last user of drm file to release it. Also stop checking for kref_read(&ctx->refcount) == 1 when calling drm_sched_entity_do_release since other task might still hold a reference to this entity but we don't care since KILL means terminate job submission regardless of what other tasks are doing. v2: Use returned remaining timeout as parameter for the next call. Rebase. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 17 +++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 - 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index c5bb362..cbf2fcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -449,26 +449,31 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; + unsigned ret; + unsigned max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY_MS; idp = &mgr->ctx_handles; + mutex_lock(&mgr->lock); idr_for_each_entry(idp, ctx, id) { - if (!ctx->adev) + if (!ctx->adev) { + mutex_unlock(&mgr->lock); return; + } for (i = 0; i < ctx->adev->num_rings; i++) { if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); - else - DRM_ERROR("ctx %p is still alive\n", ctx); + ret = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, + &ctx->rings[i].entity, max_wait); + + max_wait = max_wait >= ret ? max_wait - ret : 0; } } + mutex_unlock(&mgr->lock); } void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b0bf2f2..a549483 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -855,9 +855,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = { .runtime_idle = amdgpu_pmops_runtime_idle, }; +static int amdgpu_flush(struct file *f, fl_owner_t id) +{ + struct drm_file *file_priv = f->private_data; + struct amdgpu_fpriv *fpriv = file_priv->driver_priv; + + amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr); + + return 0; +} + + static const struct file_operations amdgpu_driver_kms_fops = { .owner = THIS_MODULE, .open = drm_open, + .flush = amdgpu_flush, .release = drm_release, .unlocked_ioctl = amdgpu_drm_ioctl, .mmap = amdgpu_mmap, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ca21549..1239384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -930,7 +930,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, return; pm_runtime_get_sync(dev->dev); - amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr); if (adev->asic_type != CHIP_RAVEN) { amdgpu_uvd_free_handles(adev, file_priv); -- 2.7.4 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel