v2: move sync waiting only when flush needs Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f7113b9..2c0b453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -408,6 +408,63 @@ static bool amdgpu_vm_dedicated_vmid_ready(struct amdgpu_vm *vm) return true; } +static int amdgpu_vm_grab_dedicated_vmid(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vm_id *id = vm->dedicated_vmid[vmhub]; + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct fence *updates = sync->last_vm_update; + int r = 0; + struct fence *flushed, *tmp; + bool needs_flush = false; + + mutex_lock(&id_mgr->lock); + if (amdgpu_vm_had_gpu_reset(adev, id)) + needs_flush = true; + + flushed = id->flushed_updates; + if (updates && (!flushed || updates->context != flushed->context || + fence_is_later(updates, flushed))) + needs_flush = true; + if (needs_flush) { + tmp = amdgpu_sync_get_fence(&id->active); + if (tmp) { + r = amdgpu_sync_fence(adev, sync, tmp); + fence_put(tmp); + mutex_unlock(&id_mgr->lock); + return r; + } + } + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); + if (r) + goto out; + + if (updates && (!flushed || updates->context != flushed->context || + fence_is_later(updates, flushed))) { + fence_put(id->flushed_updates); + id->flushed_updates = fence_get(updates); + } + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); + atomic64_set(&id->owner, vm->client_id); + job->vm_needs_flush = needs_flush; + + job->vm_id = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); +out: + mutex_unlock(&id_mgr->lock); + return r; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -432,6 +489,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned i; int r = 0; + if (amdgpu_vm_dedicated_vmid_ready(vm)) + return amdgpu_vm_grab_dedicated_vmid(vm, ring, sync, + fence, job); + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); if (!fences) return -ENOMEM; -- 1.9.1