};
/* provided by hw blocks that can write ptes, e.g., sdma */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 88de9f0d4728..e6f6d7e6368f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -37,6 +37,8 @@ struct amdgpu_vm_pt_cursor {
unsigned int level;
};
+static void amdgpu_vm_pt_free_work(struct work_struct *work);
+
/**
* amdgpu_vm_pt_level_shift - return the addr shift for each level
*
@@ -607,6 +609,7 @@ static int amdgpu_vm_pt_alloc(struct
amdgpu_device *adev,
pt_bo = &pt->bo;
pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
amdgpu_vm_bo_base_init(entry, vm, pt_bo);
+ INIT_WORK(&entry->free_work, amdgpu_vm_pt_free_work);
r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
if (r)
goto error_free_pt;
@@ -624,23 +627,46 @@ static int amdgpu_vm_pt_alloc(struct
amdgpu_device *adev,
*
* @entry: PDE to free
*/
-static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry, bool
unlocked)
{
struct amdgpu_bo *shadow;
if (!entry->bo)
return;
+
+ if (unlocked) {
+ schedule_work(&entry->free_work);
+ return;
+ }
+
shadow = amdgpu_bo_shadowed(entry->bo);
if (shadow) {
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
amdgpu_bo_unref(&shadow);
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+
entry->bo->vm_bo = NULL;
list_del(&entry->vm_status);
amdgpu_bo_unref(&entry->bo);
}
+static void amdgpu_vm_pt_free_work(struct work_struct *work)
+{
+ struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_bo *root;
+
+ entry = container_of(work, struct amdgpu_vm_bo_base, delayed_work);
+
+ root = amdgpu_bo_ref(entry->vm->root.bo);
+ amdgpu_bo_reserve(root, true);
+
+ amdgpu_vm_pt_free(entry, true);
+
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+}
+
/**
* amdgpu_vm_pt_free_dfs - free PD/PT levels
*
@@ -652,16 +678,17 @@ static void amdgpu_vm_pt_free(struct
amdgpu_vm_bo_base *entry)
*/
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
+ struct amdgpu_vm_pt_cursor *start,
+ bool unlocked)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
- amdgpu_vm_pt_free(entry);
+ amdgpu_vm_pt_free(entry, unlocked);
if (start)
- amdgpu_vm_pt_free(start->entry);
+ amdgpu_vm_pt_free(start->entry, unlocked);
}
/**
@@ -673,7 +700,7 @@ static void amdgpu_vm_pt_free_dfs(struct
amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct
amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
}
/**
@@ -966,7 +993,8 @@ int amdgpu_vm_ptes_update(struct
amdgpu_vm_update_params *params,
if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor);
+ &cursor,
+ params->unlocked);
}
amdgpu_vm_pt_next(adev, &cursor);
}