From: Christian König <christian.koenig@xxxxxxx> Otherwise we can run into the following situation: 1. Process A grabs ID 1 for ring 0. 2. Process B grabs ID 1 for ring 0. 3. Process A grabs ID 1 for ring 1. 4. Process A tries to reuse ID1 for ring 0 but things he doesn't need to flush. Signed-off-by: Christian König <christian.koenig at amd.com> Cc: stable at vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f8615a4..7ac79d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -256,7 +256,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (amdgpu_vm_is_gpu_reset(adev, id)) continue; - if (atomic64_read(&id->owner) != vm->client_id) + if (atomic64_read(&id->owner) != (vm->client_id + i)) continue; if (job->vm_pd_addr != id->pd_gpu_addr) @@ -310,7 +310,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id->pd_gpu_addr = job->vm_pd_addr; id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); - atomic64_set(&id->owner, vm->client_id); + atomic64_set(&id->owner, vm->client_id + ring->idx); vm->ids[ring->idx] = id; job->vm_id = id - adev->vm_manager.ids; @@ -1496,7 +1496,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) vm->ids[i] = NULL; vm->va = RB_ROOT; - vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); + vm->client_id = atomic64_add_return(AMDGPU_MAX_RINGS, + &adev->vm_manager.client_counter); spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); -- 2.5.0