Spreading the load across multiple SDMA engines can increase memory transfer performance. Signed-off-by: Andres Rodriguez <andresx7 at gmail.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 5a7c691..e8984df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -241,38 +241,38 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, return -EINVAL; } if (ring >= ip_num_rings) { DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n", ring, ip_num_rings, hw_ip); return -EINVAL; } mutex_lock(&mapper->lock); *out_ring = amdgpu_get_cached_map(mapper, ring); if (*out_ring) { /* cache hit */ r = 0; goto out_unlock; } switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: - case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_VCE: r = amdgpu_identity_map(adev, mapper, ring, out_ring); break; + case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_COMPUTE: r = amdgpu_lru_map(adev, mapper, ring, out_ring); break; default: *out_ring = NULL; r = -EINVAL; DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip); } out_unlock: mutex_unlock(&mapper->lock); return r; } -- 2.9.3