On 2017å¹´10æ??27æ?¥ 22:43, Christian König wrote: > From: Christian König <christian.koenig at amd.com> > > Rename amdgpu_gtt_mgr_is_allocated() to amdgpu_gtt_mgr_has_gart_addr() and use > that instead. > > v2: rename the function as well. > > Signed-off-by: Christian König <christian.koenig at amd.com> Reviewed-by: Chunming Zhou <david1.zhou at amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 +++--- > drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 24 +++++++++--------------- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +-- > 5 files changed, 15 insertions(+), 22 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c > index 33535d3..29c5c3e 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c > @@ -85,13 +85,13 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) > } > > /** > - * amdgpu_gtt_mgr_is_allocated - Check if mem has address space > + * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space > * > * @mem: the mem object to check > * > * Check if a mem object has already address space allocated. > */ > -bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) > +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) > { > struct drm_mm_node *node = mem->mm_node; > > @@ -120,7 +120,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, > unsigned long fpfn, lpfn; > int r; > > - if (amdgpu_gtt_mgr_is_allocated(mem)) > + if (amdgpu_gtt_mgr_has_gart_addr(mem)) > return 0; > > if (place) > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > index b3351dc21..e44b880 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > @@ -982,7 +982,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) > { > WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); > WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && > - !amdgpu_ttm_is_bound(bo->tbo.ttm)); > + !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); > WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && > !bo->pin_count); > WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h > index 428aae04..33615e2 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h > @@ -187,7 +187,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) > static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) > { > switch (bo->tbo.mem.mem_type) { > - case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); > + case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem); > case TTM_PL_VRAM: return true; > default: return false; > } > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 52cab7a..632bfe3 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, > { > uint64_t addr = 0; > > - if (mem->mem_type != TTM_PL_TT || > - amdgpu_gtt_mgr_is_allocated(mem)) { > + if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { > addr = mm_node->start << PAGE_SHIFT; > addr += bo->bdev->man[mem->mem_type].gpu_offset; > } > @@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, > * dst to window 1 > */ > if (src->mem->mem_type == TTM_PL_TT && > - !amdgpu_gtt_mgr_is_allocated(src->mem)) { > + !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { > r = amdgpu_map_buffer(src->bo, src->mem, > PFN_UP(cur_size + src_page_offset), > src_node_start, 0, ring, > @@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, > } > > if (dst->mem->mem_type == TTM_PL_TT && > - !amdgpu_gtt_mgr_is_allocated(dst->mem)) { > + !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { > r = amdgpu_map_buffer(dst->bo, dst->mem, > PFN_UP(cur_size + dst_page_offset), > dst_node_start, 1, ring, > @@ -861,8 +860,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, > bo_mem->mem_type == AMDGPU_PL_OA) > return -EINVAL; > > - if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) > + if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { > + gtt->offset = AMDGPU_BO_INVALID_OFFSET; > return 0; > + } > > spin_lock(>t->adev->gtt_list_lock); > flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); > @@ -882,24 +883,17 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, > return r; > } > > -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) > -{ > - struct amdgpu_ttm_tt *gtt = (void *)ttm; > - > - return gtt && !list_empty(>t->list); > -} > - > int amdgpu_ttm_bind(struct ttm_buffer_object *bo) > { > struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); > - struct ttm_tt *ttm = bo->ttm; > struct ttm_mem_reg tmp; > > struct ttm_placement placement; > struct ttm_place placements; > int r; > > - if (!ttm || amdgpu_ttm_is_bound(ttm)) > + if (bo->mem.mem_type != TTM_PL_TT || > + amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) > return 0; > > tmp = bo->mem; > @@ -960,7 +954,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) > if (gtt->userptr) > amdgpu_ttm_tt_unpin_userptr(ttm); > > - if (!amdgpu_ttm_is_bound(ttm)) > + if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) > return 0; > > /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > index 0108a59..016d2af 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > @@ -67,7 +67,7 @@ struct amdgpu_copy_mem { > extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; > extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; > > -bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); > +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); > uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); > > uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); > @@ -90,7 +90,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, > struct dma_fence **fence); > > int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); > -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); > int amdgpu_ttm_bind(struct ttm_buffer_object *bo); > int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); >