From: Dave Airlie <airlied@xxxxxxxxxx> The two accel cleanup paths were mostly the same once refactored. Just pass a bool to say if the evictions are to be pipelined. Signed-off-by: Dave Airlie <airlied@xxxxxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 89 +++++++++---------------- include/drm/ttm/ttm_bo_driver.h | 17 +---- 5 files changed, 37 insertions(+), 78 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a57aaf666340..1ea58ce7c559 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -499,10 +499,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } /* Always block for VM page tables before committing the new location */ - if (bo->type == ttm_bo_type_kernel) - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); - else - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, true, bo->type != ttm_bo_type_kernel, new_mem); dma_fence_put(fence); return r; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 56f974c28eb5..2ee75646ad6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -824,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = ttm_bo_move_accel_cleanup(bo, &fence->base, - evict, + evict, false, new_reg); nouveau_fence_unref(&fence); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 99d9ca1087b7..36150b7f31a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -200,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (IS_ERR(fence)) return PTR_ERR(fence); - r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem); radeon_fence_unref(&fence); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 502d334786d2..777f843cdb98 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -581,81 +581,56 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, return 0; } -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, - bool evict, - struct ttm_resource *new_mem) +static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, + struct dma_fence *fence) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - int ret; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - dma_resv_add_excl_fence(bo->base.resv, fence); - if (evict) - ret = ttm_bo_wait_free_node(bo, man->use_tt); - else - ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); - if (ret) - return ret; + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + spin_lock(&from->move_lock); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); + } + spin_unlock(&from->move_lock); - ttm_bo_assign_mem(bo, new_mem); + ttm_bo_free_old_node(bo); - return 0; + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); } -EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem) +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool evict, + bool pipeline, + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); - - int ret; + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret = 0; dma_resv_add_excl_fence(bo->base.resv, fence); + if (!evict) + ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); + else if (!from->use_tt && pipeline) + ttm_bo_move_pipeline_evict(bo, fence); + else + ret = ttm_bo_wait_free_node(bo, man->use_tt); - if (!evict) { - ret = ttm_bo_move_to_ghost(bo, fence, to->use_tt); - if (ret) - return ret; - } else if (!from->use_tt) { - - /** - * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation - */ - - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); - } - spin_unlock(&from->move_lock); - - ttm_bo_free_old_node(bo); - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - - } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ - ret = ttm_bo_wait_free_node(bo, to->use_tt); - if (ret) - return ret; - } + if (ret) + return ret; ttm_bo_assign_mem(bo, new_mem); return 0; } -EXPORT_SYMBOL(ttm_bo_pipeline_move); +EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 25cc932d63f1..864afa8f6f18 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -642,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. + * @pipeline: evictions are to be pipelined. * @new_mem: struct ttm_resource indicating where to move. * * Accelerated move function to be called when an accelerated move @@ -653,23 +654,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, + bool pipeline, struct ttm_resource *new_mem); -/** - * ttm_bo_pipeline_move. - * - * @bo: A pointer to a struct ttm_buffer_object. - * @fence: A fence object that signals when moving is complete. - * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_resource indicating where to move. - * - * Function for pipelining accelerated moves. Either free the memory - * immediately or hang it on a temporary buffer object. - */ -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem); - /** * ttm_bo_pipeline_gutting. * -- 2.27.0 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel