Am 27.02.2018 um 09:47 schrieb Monk Liu: > SRIOV doesn't give VF cg/pg feature so the MM's idle_work > is skipped for SR-IOV > > v2: > remove superfluous changes > since idle_work is not scheduled for SR-IOV so the condition > check for SR-IOV inside idle_work also can be dropped > > Change-Id: I6dd7ea48d23b0fee74ecb9e93b53bfe36b0e8164 > Signed-off-by: Monk Liu <Monk.Liu at amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 12 ++++++------ > drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 9 ++++----- > 2 files changed, 10 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > index 9cd5517..85dc7fc 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > @@ -297,7 +297,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) > if (adev->uvd.vcpu_bo == NULL) > return 0; > > - cancel_delayed_work_sync(&adev->uvd.idle_work); > + if (!amdgpu_sriov_vf(adev)) > + cancel_delayed_work_sync(&adev->uvd.idle_work); Drop that, canceling a work item which was never scheduled is harmless and the code isn't performance critical. > > for (i = 0; i < adev->uvd.max_handles; ++i) > if (atomic_read(&adev->uvd.handles[i])) > @@ -1116,9 +1117,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) > container_of(work, struct amdgpu_device, uvd.idle_work.work); > unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); > > - if (amdgpu_sriov_vf(adev)) > - return; > - > if (fences == 0) { > if (adev->pm.dpm_enabled) { > amdgpu_dpm_enable_uvd(adev, false); > @@ -1138,11 +1136,12 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) > void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) > { > struct amdgpu_device *adev = ring->adev; > - bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); > + bool set_clocks; > > if (amdgpu_sriov_vf(adev)) > return; > > + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); > if (set_clocks) { > if (adev->pm.dpm_enabled) { > amdgpu_dpm_enable_uvd(adev, true); > @@ -1158,7 +1157,8 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) > > void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) > { > - schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); > + if (!amdgpu_sriov_vf(ring->adev)) > + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); > } > > /** > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > index d274ae5..a5239b8 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > @@ -241,7 +241,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) > if (i == AMDGPU_MAX_VCE_HANDLES) > return 0; > > - cancel_delayed_work_sync(&adev->vce.idle_work); > + if (!amdgpu_sriov_vf(adev)) > + cancel_delayed_work_sync(&adev->vce.idle_work); Dito. Apart from that the patch looks good to me, Christian. > /* TODO: suspending running encoding sessions isn't supported */ > return -EINVAL; > } > @@ -300,9 +301,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) > container_of(work, struct amdgpu_device, vce.idle_work.work); > unsigned i, count = 0; > > - if (amdgpu_sriov_vf(adev)) > - return; > - > for (i = 0; i < adev->vce.num_rings; i++) > count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); > > @@ -362,7 +360,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) > */ > void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) > { > - schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); > + if (!amdgpu_sriov_vf(ring->adev)) > + schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); > } > > /**