+ dma_resv_unlock(bo->tbo.base.resv);
}
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
@@ -1203,8 +1203,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device
*adev, struct amdgpu_bo_va *bo_va,
uncached = false;
}
- if (clear || (bo && bo->tbo.base.resv ==
- vm->root.bo->tbo.base.resv))
+ if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1246,7 +1245,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device
*adev, struct amdgpu_bo_va *bo_va,
* the evicted list so that it gets validated again on the
* next command submission.
*/
- if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
+ if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
@@ -1640,10 +1639,9 @@ static void amdgpu_vm_bo_insert_map(struct
amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
- !bo_va->base.moved) {
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
amdgpu_vm_bo_moved(&bo_va->base);
- }
+
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@@ -1942,7 +1940,7 @@ int amdgpu_vm_bo_clear_mappings(struct
amdgpu_device *adev,
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.base.resv ==
vm->root.bo->tbo.base.resv &&
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
!before->bo_va->base.moved)
amdgpu_vm_bo_moved(&before->bo_va->base);
} else {
@@ -1957,7 +1955,7 @@ int amdgpu_vm_bo_clear_mappings(struct
amdgpu_device *adev,
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.base.resv ==
vm->root.bo->tbo.base.resv &&
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
!after->bo_va->base.moved)
amdgpu_vm_bo_moved(&after->bo_va->base);
} else {
@@ -2037,7 +2035,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device
*adev,
if (bo) {
dma_resv_assert_held(bo->tbo.base.resv);
- if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+ if (amdgpu_vm_is_bo_always_valid(vm, bo))
ttm_bo_set_bulk_move(&bo->tbo, NULL);
for (base = &bo_va->base.bo->vm_bo; *base;
@@ -2131,7 +2129,7 @@ void amdgpu_vm_bo_invalidate(struct
amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (evicted && bo->tbo.base.resv ==
vm->root.bo->tbo.base.resv) {
+ if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
amdgpu_vm_bo_evicted(bo_base);
continue;
}
@@ -2142,7 +2140,7 @@ void amdgpu_vm_bo_invalidate(struct
amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base);
- else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+ else if (amdgpu_vm_is_bo_always_valid(vm, bo))
amdgpu_vm_bo_moved(bo_base);
else
amdgpu_vm_bo_invalidated(bo_base);
@@ -3006,3 +3004,16 @@ void amdgpu_vm_update_fault_cache(struct
amdgpu_device *adev,
xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
}
+/**
+ * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
+ *
+ * @vm: VM to test against.
+ * @abo: BO to be tested.
+ *
+ * Returns true if the BO shares the dma_resv object with the
root PD and is
+ * always guaranteed to be valid inside the VM.
+ */
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct
amdgpu_bo *bo)
+{
+ return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 54d7da396de0..ec688a47dec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -561,6 +561,8 @@ void amdgpu_debugfs_vm_bo_info(struct
amdgpu_vm *vm, struct seq_file *m);
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct
amdgpu_vm *vm);
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct
amdgpu_bo *bo);
+
/**
* amdgpu_vm_tlb_seq - return tlb flush sequence number
* @vm: the amdgpu_vm structure to query