+ .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -907,7 +915,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
*/
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ return amdgpu_bo_pin_restricted(bo, domain, 0, 0, NULL);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c430e8259038..41e7fff7f3f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -247,7 +247,8 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset);
+ u64 min_offset, u64 max_offset,
+ struct ww_acquire_ctx *ticket);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 22bd21efe6b1..dd31ce1a09e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1150,7 +1150,7 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
r = amdgpu_bo_pin_restricted(bo,
AMDGPU_GEM_DOMAIN_VRAM,
offset,
- offset + size);
+ offset + size, NULL);
if (r)
goto error_pin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index afccca5b1f5f..e63af0debc7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1661,7 +1661,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
+ adev->fw_vram_usage.size), NULL);
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a5cacf846e1b..bdbbc3891585 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4101,6 +4101,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ struct list_head list, duplicates;
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
uint64_t tiling_flags;
uint32_t domain;
int r;
@@ -4119,32 +4122,43 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r != 0))
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
+
+ tv.bo = &rbo->tbo;
+ tv.num_shared = 1;
+ list_add(&tv.head, &list);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
+ }
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
- r = amdgpu_bo_pin(rbo, domain);
+ r = amdgpu_bo_pin_restricted(rbo, domain, 0, 0, &ticket);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
if (unlikely(r != 0)) {
amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("%p bind failed\n", rbo);
return r;
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8502b3ed2d88..112fea8f21f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -766,11 +766,12 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx, bool *locked)
+ struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
{
bool ret = false;
*locked = false;
+ *busy = false;
if (bo->resv == ctx->resv) {
reservation_object_assert_held(bo->resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
@@ -779,6 +780,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
} else {
*locked = reservation_object_trylock(bo->resv);
ret = *locked;
+ if (ret)
+ *busy = true;
}
return ret;
@@ -791,16 +794,20 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_buffer_object *bo = NULL;
- bool locked = false;
+ struct ttm_buffer_object *bo = NULL, *first_bo = NULL;
+ bool locked = false, list_busy = false;
unsigned i;
int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
+ bool busy = false;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ &busy)) {
+ list_busy |= busy;
continue;
+ }
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
@@ -819,8 +826,44 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ if (list_empty(&man->lru[i]))
+ continue;
+ bo = list_first_entry(&man->lru[i],
+ struct ttm_buffer_object,
+ lru);
+
+ break;
+ }