---
.../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 9 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +-
.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
drivers/gpu/drm/qxl/qxl_release.c | 4 +-
drivers/gpu/drm/radeon/radeon_gem.c | 2 +-
drivers/gpu/drm/radeon/radeon_object.c | 2 +-
drivers/gpu/drm/ttm/ttm_bo.c | 48
+++++--------------
drivers/gpu/drm/ttm/ttm_execbuf_util.c | 25 ++--------
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 3 +-
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | 2 +-
include/drm/ttm/ttm_bo_api.h | 24 ----------
include/drm/ttm/ttm_bo_driver.h | 20 ++------
include/drm/ttm/ttm_execbuf_util.h | 2 +-
15 files changed, 33 insertions(+), 118 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b9bb35d1699e..ad01c741caaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -586,7 +586,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct
kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -1796,8 +1796,7 @@ static int validate_invalid_user_pages(struct
amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false,
&duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false,
&duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1995,7 +1994,7 @@ int
amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence
**ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 95ec965fcc2d..82f239ac4050 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -650,7 +650,7 @@ static int amdgpu_cs_parser_bos(struct
amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 605f83046039..b1608d47508f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device
*adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b25a59c4bec6..cbfb3e84f352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct
drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false,
&duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -611,7 +611,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev,
void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates,
false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5df2ee1e10d8..760078b9925b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4439,7 +4439,7 @@ static int dm_plane_helper_prepare_fb(struct
drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c
b/drivers/gpu/drm/qxl/qxl_release.c
index aa7a28795645..71f1b51a1350 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release
*release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct
qxl_release *release)
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c
b/drivers/gpu/drm/radeon/radeon_gem.c
index c48c2fb35456..879eac67cdd7 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct
radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c
b/drivers/gpu/drm/radeon/radeon_object.c
index 653fd7937b39..a94d827ba976 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device
*rdev,
u64 bytes_moved_threshold =
radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c
b/drivers/gpu/drm/ttm/ttm_bo.c
index 02f301e1ff29..ab9967b84e0b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -192,18 +192,12 @@ static void ttm_bo_add_mem_to_lru(struct
ttm_buffer_object *bo,
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct
ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos
*pos,
struct ttm_buffer_object *bo)
{
@@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct
ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -511,7 +495,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct
ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@@ -892,17 +876,11 @@ static int ttm_mem_evict_first(struct
ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -1064,12 +1042,10 @@ static int ttm_bo_mem_placement(struct
ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&bo->bdev->glob->lru_lock);
return 0;
}
@@ -1375,11 +1351,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device
*bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&bdev->glob->lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&bdev->glob->lru_lock);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 71148c83cc4f..b09c2c8caf13 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -33,16 +33,6 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct dma_resv_ctx *ticket,
struct list_head *list)
{
@@ -59,8 +49,7 @@ void ttm_eu_backoff_reservation(struct
dma_resv_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
if (!ticket)
dma_resv_unlock(bo->base.resv);
}
@@ -87,7 +76,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct dma_resv_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
@@ -142,11 +131,6 @@ int ttm_eu_reserve_buffers(struct dma_resv_ctx
*ticket,
}
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
error:
@@ -186,10 +170,7 @@ void ttm_eu_fence_buffer_objects(struct
dma_resv_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
if (!ticket)
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 2d7c5ad25359..1d53a69819e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -464,8 +464,7 @@ vmw_resource_check_buffer(struct dma_resv_ctx
*ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible,
NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible,
NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 43f48df3844f..ebb5b68b5c1f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct
vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
+ NULL);
}
/**
diff --git a/include/drm/ttm/ttm_bo_api.h
b/include/drm/ttm/ttm_bo_api.h
index 43c4929a2171..288fd64b7219 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -367,30 +367,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_add_to_lru
- *
- * @bo: The buffer object.
- *
- * Add this bo to the relevant mem type lru and, if it's backed by
- * system pages (ttms) to the swap list.
- * This function must be called with struct ttm_bo_global::lru_lock
held, and
- * is typically called immediately prior to unreserving a bo.
- */
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_del_from_lru
- *
- * @bo: The buffer object.
- *
- * Remove this bo from all lru lists used to lookup and reserve an
object.
- * This function must be called with struct ttm_bo_global::lru_lock
held,
- * and is usually called just immediately after the bo has been
reserved to
- * avoid recursive reservation from lru lists.
- */
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
-
/**
* ttm_bo_move_to_lru_tail
*
diff --git a/include/drm/ttm/ttm_bo_driver.h
b/include/drm/ttm/ttm_bo_driver.h
index 4e307f65f497..4332cc036483 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -621,9 +621,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool
interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
@@ -717,15 +714,9 @@ static inline int ttm_bo_reserve(struct
ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret;
-
WARN_ON(!kref_read(&bo->kref));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
+ return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
@@ -752,9 +743,7 @@ static inline int ttm_bo_reserve_slowpath(struct
ttm_buffer_object *bo,
else
dma_resv_lock_slow(bo->base.resv, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
+ if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
@@ -770,10 +759,7 @@ static inline int
ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->glob->lru_lock);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
dma_resv_unlock(bo->base.resv);
}
diff --git a/include/drm/ttm/ttm_execbuf_util.h
b/include/drm/ttm/ttm_execbuf_util.h
index 4e86b6fd6c57..1a3553ac53b1 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -100,7 +100,7 @@ extern void ttm_eu_backoff_reservation(struct
dma_resv_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct dma_resv_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru);
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.