Am 01.02.2018 um 06:44 schrieb Chunming Zhou: > > > On 2018å¹´01æ??31æ?¥ 23:47, Christian König wrote: >> The variable is protected by the VMID mutex anyway. >> >> Signed-off-by: Christian König <christian.koenig at amd.com> >> --- >>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 10 +++++----- >>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 2 +- >>  2 files changed, 6 insertions(+), 6 deletions(-) >> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c >> index fbe958f7cb5b..8374fe870e8c 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c >> @@ -267,7 +267,7 @@ static int >> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, >>       flushed = id->flushed_updates; >>      if ((amdgpu_vmid_had_gpu_reset(adev, id)) || >> -       (atomic64_read(&id->owner) != vm->entity.fence_context) || >> +       (id->owner != vm->entity.fence_context) || >>          (job->vm_pd_addr != id->pd_gpu_addr) || >>          (updates && (!flushed || updates->context != >> flushed->context || >>              dma_fence_is_later(updates, flushed))) || >> @@ -296,7 +296,7 @@ static int >> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, >>          id->flushed_updates = dma_fence_get(updates); >>      } >>      id->pd_gpu_addr = job->vm_pd_addr; >> -   atomic64_set(&id->owner, vm->entity.fence_context); >> +   id->owner = vm->entity.fence_context; >>      job->vm_needs_flush = needs_flush; >>      if (needs_flush) { >>          dma_fence_put(id->last_flush); >> @@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct >> amdgpu_ring *ring, >>          if (amdgpu_vmid_had_gpu_reset(adev, id)) >>              continue; >>  -       if (atomic64_read(&id->owner) != vm->entity.fence_context) >> +       if (id->owner != vm->entity.fence_context) >>              continue; >>           if (job->vm_pd_addr != id->pd_gpu_addr) >> @@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct >> amdgpu_ring *ring, >>      id->pd_gpu_addr = job->vm_pd_addr; >>      dma_fence_put(id->flushed_updates); >>      id->flushed_updates = dma_fence_get(updates); >> -   atomic64_set(&id->owner, vm->entity.fence_context); >> +   id->owner = vm->entity.fence_context; >>   needs_flush: >>      job->vm_needs_flush = true; >> @@ -482,7 +482,7 @@ void amdgpu_vmid_reset(struct amdgpu_device >> *adev, unsigned vmhub, >>      struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; >>      struct amdgpu_vmid *id = &id_mgr->ids[vmid]; >>  -   atomic64_set(&id->owner, 0); >> +   id->owner = 0; > here is not protect by id_mgr mutex, you can make another patch to add > mutex for this function. That function is only called in case of suspend/resume and GPU reset, but adding the extra mutex here probably won't hurt and is indeed cleaner. Christian. > > Regards, > David Zhou >>      id->gds_base = 0; >>      id->gds_size = 0; >>      id->gws_base = 0; >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h >> index 38f37c16fc5e..20d4eca6cd6a 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h >> @@ -43,7 +43,7 @@ struct amdgpu_vmid { >>      struct list_head   list; >>      struct amdgpu_sync   active; >>      struct dma_fence   *last_flush; >> -   atomic64_t       owner; >> +   uint64_t       owner; >>       uint64_t       pd_gpu_addr; >>      /* last flushed PD/PT update */ >