Spreading the load across multiple SDMA engines can increase memory transfer performance. Signed-off-by: Andres Rodriguez <andresx7 at gmail.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index adf59ef..1c249df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -252,25 +252,25 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, *out_ring = amdgpu_get_cached_map(mapper, ring); if (*out_ring) { /* cache hit */ r = 0; goto out_unlock; } switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: - case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_VCE: r = amdgpu_identity_map(adev, mapper, ring, out_ring); break; + case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_COMPUTE: r = amdgpu_lru_map(adev, mapper, ring, out_ring); break; default: *out_ring = NULL; r = -EINVAL; DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip); } out_unlock: -- 2.9.3