We tried this once before, but that turned out to be more complicated than thought. With all the right prerequisites it looks like we can do this now. Signed-off-by: Christian König <christian.koenig@xxxxxxx> --- drivers/gpu/drm/ttm/ttm_bo.c | 128 ++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1fe302dee1a8..ec0bcc0241a8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -969,13 +969,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, * space, or we've evicted everything and there isn't enough space. */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, - uint32_t mem_type, - const struct ttm_place *place, - struct ttm_mem_reg *mem, - struct ttm_operation_ctx *ctx) + const struct ttm_place *place, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; int ret; do { @@ -984,11 +983,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; if (mem->mm_node) break; - ret = ttm_mem_evict_first(bdev, mem_type, place, ctx, bo->resv); + ret = ttm_mem_evict_first(bdev, mem->mem_type, place, + ctx, bo->resv); if (unlikely(ret != 0)) return ret; } while (1); - mem->mem_type = mem_type; + return ttm_bo_add_move_fence(bo, man, mem); } @@ -1036,6 +1036,51 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, return true; } +/** + * ttm_bo_mem_placement - check if placement is compatible + * @bo: BO to find memory for + * @place: where to search + * @mem: the memory object to fill in + * @ctx: operation context + * + * Check if placement is compatible and fill in mem structure. + * Returns -EBUSY if placement won't work or negative error code. + * 0 when placement can be used. + */ +static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx) +{ + struct ttm_bo_device *bdev = bo->bdev; + uint32_t mem_type = TTM_PL_SYSTEM; + struct ttm_mem_type_manager *man; + uint32_t cur_flags = 0; + int ret; + + ret = ttm_mem_type_from_place(place, &mem_type); + if (ret) + return ret; + + man = &bdev->man[mem_type]; + if (!man->has_type || !man->use_type) + return -EBUSY; + + if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) + return -EBUSY; + + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); + /* + * Use the access and other non-mapping-related flag bits from + * the memory placement flags to the current flags + */ + ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); + + mem->mem_type = mem_type; + mem->placement = cur_flags; + return 0; +} + /** * Creates space for memory region @mem according to its type. * @@ -1050,11 +1095,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; - uint32_t mem_type = TTM_PL_SYSTEM; - uint32_t cur_flags = 0; bool type_found = false; - bool type_ok = false; int i, ret; ret = reservation_object_reserve_shared(bo->resv, 1); @@ -1064,37 +1105,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; + struct ttm_mem_type_manager *man; - ret = ttm_mem_type_from_place(place, &mem_type); + ret = ttm_bo_mem_placement(bo, place, mem, ctx); + if (ret == -EBUSY) + continue; if (ret) return ret; - man = &bdev->man[mem_type]; - if (!man->has_type || !man->use_type) - continue; - - type_ok = ttm_bo_mt_compatible(man, mem_type, place, - &cur_flags); - - if (!type_ok) - continue; type_found = true; - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, - cur_flags); - /* - * Use the access and other non-mapping-related flag bits from - * the memory placement flags to the current flags - */ - ttm_flag_masked(&cur_flags, place->flags, - ~TTM_PL_MASK_MEMTYPE); - - if (mem_type == TTM_PL_SYSTEM) { - mem->mem_type = mem_type; - mem->placement = cur_flags; - mem->mm_node = NULL; + mem->mm_node = NULL; + if (mem->mem_type == TTM_PL_SYSTEM) return 0; - } + man = &bdev->man[mem->mem_type]; ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret)) return ret; @@ -1105,8 +1129,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, (*man->func->put_node)(man, mem); return ret; } - mem->mem_type = mem_type; - mem->placement = cur_flags; return 0; } } @@ -1114,37 +1136,21 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_busy_placement; ++i) { const struct ttm_place *place = &placement->busy_placement[i]; - ret = ttm_mem_type_from_place(place, &mem_type); + ret = ttm_bo_mem_placement(bo, place, mem, ctx); + if (ret == -EBUSY) + continue; if (ret) return ret; - man = &bdev->man[mem_type]; - if (!man->has_type || !man->use_type) - continue; - if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) - continue; type_found = true; - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, - cur_flags); - /* - * Use the access and other non-mapping-related flag bits from - * the memory placement flags to the current flags - */ - ttm_flag_masked(&cur_flags, place->flags, - ~TTM_PL_MASK_MEMTYPE); - - if (mem_type == TTM_PL_SYSTEM) { - mem->mem_type = mem_type; - mem->placement = cur_flags; - mem->mm_node = NULL; + mem->mm_node = NULL; + if (mem->mem_type == TTM_PL_SYSTEM) return 0; - } - ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx); - if (ret == 0 && mem->mm_node) { - mem->placement = cur_flags; + ret = ttm_bo_mem_force_space(bo, place, mem, ctx); + if (ret == 0 && mem->mm_node) return 0; - } + if (ret && ret != -EBUSY) return ret; } -- 2.17.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx