On Fri, 30 Apr 2021 at 10:25, Christian König <ckoenig.leichtzumerken@xxxxxxxxx> wrote: > > Init all fields in ttm_resource_alloc() when we create a new resource. > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > Reviewed-by: Matthew Auld <matthew.auld@xxxxxxxxx> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 -- > drivers/gpu/drm/ttm/ttm_bo.c | 26 ++++--------------------- > drivers/gpu/drm/ttm/ttm_bo_util.c | 4 ++-- > drivers/gpu/drm/ttm/ttm_resource.c | 9 +++++++++ > 4 files changed, 15 insertions(+), 26 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 7ba761e833ba..2f7580d2ee71 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -1015,8 +1015,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) > } else { > > /* allocate GART space */ > - tmp = bo->mem; > - tmp.mm_node = NULL; > placement.num_placement = 1; > placement.placement = &placements; > placement.num_busy_placement = 1; > diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c > index df63a07a70de..55f1ddcf22b6 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo.c > +++ b/drivers/gpu/drm/ttm/ttm_bo.c > @@ -507,11 +507,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, > return ttm_tt_create(bo, false); > } > > - evict_mem = bo->mem; > - evict_mem.mm_node = NULL; > - evict_mem.bus.offset = 0; > - evict_mem.bus.addr = NULL; > - > ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); > if (ret) { > if (ret != -ERESTARTSYS) { > @@ -867,12 +862,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, > struct ttm_place *hop) > { > struct ttm_placement hop_placement; > + struct ttm_resource hop_mem; > int ret; > - struct ttm_resource hop_mem = *mem; > - > - hop_mem.mm_node = NULL; > - hop_mem.mem_type = TTM_PL_SYSTEM; > - hop_mem.placement = 0; > > hop_placement.num_placement = hop_placement.num_busy_placement = 1; > hop_placement.placement = hop_placement.busy_placement = hop; > @@ -894,19 +885,14 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, > struct ttm_placement *placement, > struct ttm_operation_ctx *ctx) > { > - int ret = 0; > struct ttm_place hop; > struct ttm_resource mem; > + int ret; > > dma_resv_assert_held(bo->base.resv); > > memset(&hop, 0, sizeof(hop)); > > - mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT; > - mem.bus.offset = 0; > - mem.bus.addr = NULL; > - mem.mm_node = NULL; > - > /* > * Determine where to move the buffer. > * > @@ -1027,6 +1013,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, > struct dma_resv *resv, > void (*destroy) (struct ttm_buffer_object *)) > { > + static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; > bool locked; > int ret = 0; > > @@ -1038,13 +1025,8 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, > bo->bdev = bdev; > bo->type = type; > bo->page_alignment = page_alignment; > - bo->mem.mem_type = TTM_PL_SYSTEM; > - bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; > - bo->mem.mm_node = NULL; > - bo->mem.bus.offset = 0; > - bo->mem.bus.addr = NULL; > + ttm_resource_alloc(bo, &sys_mem, &bo->mem); > bo->moving = NULL; > - bo->mem.placement = 0; > bo->pin_count = 0; > bo->sg = sg; > if (resv) { > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c > index efb7e9c34ab4..ae8b61460724 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c > @@ -664,6 +664,7 @@ EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); > > int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) > { > + static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; > struct ttm_buffer_object *ghost; > int ret; > > @@ -676,8 +677,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) > if (ret) > ttm_bo_wait(bo, false, false); > > - memset(&bo->mem, 0, sizeof(bo->mem)); > - bo->mem.mem_type = TTM_PL_SYSTEM; > + ttm_resource_alloc(bo, &sys_mem, &bo->mem); > bo->ttm = NULL; > > dma_resv_unlock(&ghost->base._resv); > diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c > index fc351700d035..cec2e6fb1c52 100644 > --- a/drivers/gpu/drm/ttm/ttm_resource.c > +++ b/drivers/gpu/drm/ttm/ttm_resource.c > @@ -33,6 +33,15 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, > ttm_manager_type(bo->bdev, res->mem_type); Hmm, should this not be place->mem_type, when fishing out the man? For example in the above pipeline_gutting case we previously nuked the bo->mem and then set the mem_type as SYS, but we dropped that now so what even is the value of res->mem_type here for that case? > > res->mm_node = NULL; > + res->start = 0; > + res->num_pages = PFN_UP(bo->base.size); > + res->mem_type = place->mem_type; > + res->placement = place->flags; > + res->bus.addr = NULL; > + res->bus.offset = 0; > + res->bus.is_iomem = false; > + res->bus.caching = ttm_cached; > + > return man->func->alloc(man, bo, place, res); > } > > -- > 2.25.1 > _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel