That's a bit more trickier since we really need hw access for UVD and VCE and eventually when the VM was in use. Signed-off-by: Christian König <christian.koenig@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1e8c8d9c0c67..1163ece2a757 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1395,18 +1395,26 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; + struct amdgpu_ip_block *uvd; + struct amdgpu_ip_block *vce; struct amdgpu_bo *pd; + bool vm_ready; u32 pasid; int handle; if (!fpriv) return; - pm_runtime_get_sync(dev->dev); + vm_ready = amdgpu_vm_ready(&fpriv->vm); + uvd = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + vce = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE); - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) + if (vm_ready || uvd || vce) + pm_runtime_get_sync(dev->dev); + + if (uvd) amdgpu_uvd_free_handles(adev, file_priv); - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) + if (vce) amdgpu_vce_free_handles(adev, file_priv); if (fpriv->csa_va) { @@ -1442,8 +1450,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + if (vm_ready || uvd || vce) { + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + } } -- 2.34.1