On 2017å¹´10æ??23æ?¥ 22:26, Christian König wrote: >> Why I prefer to keep is_bound for naming is we only can get gpu >> offset after gtt is bound not check if node is allocated. > No, it is just the other way around. > > When the TTM is bound it doesn't mean we can get the GPU offset > because it can be that there wasn't any GART space assigned. Sorry, I didn't say clear, I'm not saying TTM bound but GART bound, we only can get GPU offset after calling amdgpu_gart_bind() not node allocated, so I thought amdgpu_gtt_is_bound is more sense than old amdgpu_ttm_is_bound. > > But if the GART space was allocated, we can be sure that we also have > an GPU offset to use. I know it, in current all use case, we always do gart_bind when GART node is allocated, so I'd like use amdgpu_gtt_is_bound() to wrap amdgpu_gtt_mgr_is_allocated(). Regards, David Zhou > > Regards, > Christian. > > Am 23.10.2017 um 10:34 schrieb Chunming Zhou: >> Yes, I see the checking in backend_bind: >> >>        if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) >>                return 0; >> >> OK, then my only concern is your â??insteadâ?? assumes >> amdgpu_gtt_mgr_is_allocated means gart is bound, how about: >> >> bool amdgpu_gtt_is_bound(struct ttm_tt *ttm) >> { >>    return amdgpu_gtt_mgr_is_allocated(bo_mem); >> } >> >> Why I prefer to keep is_bound for naming is we only can get gpu >> offset after gtt is bound not check if node is allocated. >> >> Regards, >> David Zhou >> On 2017å¹´10æ??23æ?¥ 16:11, Christian König wrote: >>> Am 23.10.2017 um 09:33 schrieb Chunming Zhou: >>>> using ttm->state == tt_bound seems make more sense. >>> >>> That won't work. We set ttm->state = tt_bound even when we haven't >>> allocated GART space. >>> >>> That's also a reason why I wanted to remove it, the name is confusing. >>> >>> Christian. >>> >>>> >>>> >>>> On 2017å¹´10æ??20æ?¥ 17:20, Christian König wrote: >>>>> From: Christian König <christian.koenig at amd.com> >>>>> >>>>> use amdgpu_gtt_mgr_is_allocated() instead. >>>>> >>>>> Signed-off-by: Christian König <christian.koenig at amd.com> >>>>> --- >>>>>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- >>>>>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- >>>>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   | 15 +++++---------- >>>>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h   | 1 - >>>>>  4 files changed, 7 insertions(+), 13 deletions(-) >>>>> >>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c >>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c >>>>> index 8b4ed8a98a18..71a0c1a477ae 100644 >>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c >>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c >>>>> @@ -980,7 +980,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) >>>>>  { >>>>>      WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); >>>>>      WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && >>>>> -            !amdgpu_ttm_is_bound(bo->tbo.ttm)); >>>>> + !amdgpu_gtt_mgr_is_allocated(&bo->tbo.mem)); >>>>> WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && >>>>>               !bo->pin_count); >>>>>      WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); >>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h >>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h >>>>> index 428aae048f4b..3affcc497be6 100644 >>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h >>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h >>>>> @@ -187,7 +187,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct >>>>> amdgpu_bo *bo) >>>>>  static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) >>>>>  { >>>>>      switch (bo->tbo.mem.mem_type) { >>>>> -   case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); >>>>> +   case TTM_PL_TT: return >>>>> amdgpu_gtt_mgr_is_allocated(&bo->tbo.mem); >>>>>      case TTM_PL_VRAM: return true; >>>>>      default: return false; >>>>>      } >>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>>> index 32c822f3db11..1c9faa1a53ce 100644 >>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>>> @@ -861,8 +861,10 @@ static int amdgpu_ttm_backend_bind(struct >>>>> ttm_tt *ttm, >>>>>          bo_mem->mem_type == AMDGPU_PL_OA) >>>>>          return -EINVAL; >>>>>  -   if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) >>>>> +   if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) { >>>>> +       gtt->offset = AMDGPU_BO_INVALID_OFFSET; >>>>>          return 0; >>>>> +   } >>>>>       spin_lock(>t->adev->gtt_list_lock); >>>>>      flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); >>>>> @@ -882,13 +884,6 @@ static int amdgpu_ttm_backend_bind(struct >>>>> ttm_tt *ttm, >>>>>      return r; >>>>>  } >>>>>  -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) >>>>> -{ >>>>> -   struct amdgpu_ttm_tt *gtt = (void *)ttm; >>>>> - >>>>> -   return gtt && !list_empty(>t->list); >>>>> -} >>>>> - >>>>>  int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct >>>>> ttm_mem_reg *bo_mem) >>>>>  { >>>>>      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); >>>>> @@ -899,7 +894,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object >>>>> *bo, struct ttm_mem_reg *bo_mem) >>>>>      struct ttm_place placements; >>>>>      int r; >>>>>  -   if (!ttm || amdgpu_ttm_is_bound(ttm)) >>>>> +   if (!ttm || amdgpu_gtt_mgr_is_allocated(bo_mem)) >>>>>          return 0; >>>>>       tmp = bo->mem; >>>>> @@ -960,7 +955,7 @@ static int amdgpu_ttm_backend_unbind(struct >>>>> ttm_tt *ttm) >>>>>      if (gtt->userptr) >>>>>          amdgpu_ttm_tt_unpin_userptr(ttm); >>>>>  -   if (!amdgpu_ttm_is_bound(ttm)) >>>>> +   if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) >>>>>          return 0; >>>>>       /* unbind shouldn't be done for GDS/GWS/OA in >>>>> ttm_bo_clean_mm */ >>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>>> index abd4084982a3..89a71abc71e0 100644 >>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>>> @@ -90,7 +90,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, >>>>>              struct dma_fence **fence); >>>>>   int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); >>>>> -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); >>>>>  int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct >>>>> ttm_mem_reg *bo_mem); >>>>>  int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); >>>> >>> >> >> _______________________________________________ >> amd-gfx mailing list >> amd-gfx at lists.freedesktop.org >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx > >