The premise here is to simply avoiding having to acquire the vm->mutex inside vma create/destroy to update the vm->unbound_lists, to avoid some nasty lock recursions later. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Matthew Auld <matthew.auld@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 23 ++++--------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 5 ---- drivers/gpu/drm/i915/i915_vma.c | 12 ++-------- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- 5 files changed, 8 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index bfbc3e3daf92..e45eb8721850 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -692,7 +692,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv __i915_vma_set_map_and_fenceable(vma); mutex_lock(&ggtt->vm.mutex); - list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); + list_add_tail(&vma->vm_link, &ggtt->vm.bound_list); mutex_unlock(&ggtt->vm.mutex); GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e62e9d1a1307..ad9eb2d68f3f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -505,19 +505,12 @@ static void i915_address_space_fini(struct i915_address_space *vm) static void ppgtt_destroy_vma(struct i915_address_space *vm) { - struct list_head *phases[] = { - &vm->bound_list, - &vm->unbound_list, - NULL, - }, **phase; + struct i915_vma *vma, *vn; mutex_lock(&vm->i915->drm.struct_mutex); - for (phase = phases; *phase; phase++) { - struct i915_vma *vma, *vn; - - list_for_each_entry_safe(vma, vn, *phase, vm_link) - i915_vma_destroy(vma); - } + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) + i915_vma_destroy(vma); + GEM_BUG_ON(!list_empty(&vm->bound_list)); mutex_unlock(&vm->i915->drm.struct_mutex); } @@ -528,9 +521,6 @@ static void __i915_vm_release(struct work_struct *work) ppgtt_destroy_vma(vm); - GEM_BUG_ON(!list_empty(&vm->bound_list)); - GEM_BUG_ON(!list_empty(&vm->unbound_list)); - vm->cleanup(vm); i915_address_space_fini(vm); @@ -569,7 +559,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass) stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->unbound_list); INIT_LIST_HEAD(&vm->bound_list); } @@ -1887,10 +1876,6 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->closed_link); - mutex_lock(&vma->vm->mutex); - list_add(&vma->vm_link, &vma->vm->unbound_list); - mutex_unlock(&vma->vm->mutex); - return vma; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 8fd2234ba0bf..bbdc735466c1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -320,11 +320,6 @@ struct i915_address_space { */ struct list_head bound_list; - /** - * List of vma that are not unbound. - */ - struct list_head unbound_list; - struct pagestash free_pages; /* Global GTT */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 68c34b1a20e4..d097f77890ba 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -218,10 +218,6 @@ vma_create(struct drm_i915_gem_object *obj, spin_unlock(&obj->vma.lock); - mutex_lock(&vm->mutex); - list_add(&vma->vm_link, &vm->unbound_list); - mutex_unlock(&vm->mutex); - return vma; err_vma: @@ -657,7 +653,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); mutex_lock(&vma->vm->mutex); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); + list_add_tail(&vma->vm_link, &vma->vm->bound_list); mutex_unlock(&vma->vm->mutex); if (vma->obj) { @@ -685,7 +681,7 @@ i915_vma_remove(struct i915_vma *vma) mutex_lock(&vma->vm->mutex); drm_mm_remove_node(&vma->node); - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + list_del(&vma->vm_link); mutex_unlock(&vma->vm->mutex); /* @@ -798,10 +794,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->fence); - mutex_lock(&vma->vm->mutex); - list_del(&vma->vm_link); - mutex_unlock(&vma->vm->mutex); - if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 0d40e0b42923..84d94eabc7dd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1242,7 +1242,7 @@ static void track_vma_bind(struct i915_vma *vma) vma->pages = obj->mm.pages; mutex_lock(&vma->vm->mutex); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); + list_add_tail(&vma->vm_link, &vma->vm->bound_list); mutex_unlock(&vma->vm->mutex); } -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx