Re: [PATCH 26/39] drm/i915: Consolidate the bound/unbound vma lists into one

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 02/01/2019 09:41, Chris Wilson wrote:

Commit text to explain why please.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/i915_gem.c               |  4 +--
  drivers/gpu/drm/i915/i915_gem_evict.c         | 12 ++++++---
  drivers/gpu/drm/i915/i915_gem_gtt.c           | 27 ++++++-------------
  drivers/gpu/drm/i915/i915_gem_gtt.h           | 10 +------
  drivers/gpu/drm/i915/i915_gem_shrinker.c      |  3 +--
  drivers/gpu/drm/i915/i915_gem_stolen.c        |  4 ---
  drivers/gpu/drm/i915/i915_gpu_error.c         | 11 ++++----
  drivers/gpu/drm/i915/i915_vma.c               |  7 +----
  .../gpu/drm/i915/selftests/i915_gem_evict.c   | 11 +++-----
  drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  4 ---
  10 files changed, 32 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 66445e34c93c..a954e15c0315 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -254,7 +254,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  	mutex_lock(&ggtt->vm.mutex);
pinned = ggtt->vm.reserved;
-	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+	list_for_each_entry(vma, &ggtt->vm.vma_list, vm_link)
  		if (i915_vma_is_pinned(vma))
  			pinned += vma->node.size;
@@ -1692,7 +1692,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
  		if (!drm_mm_node_allocated(&vma->node))
  			continue;
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+		list_move_tail(&vma->vm_link, &i915->ggtt.vm.vma_list);
  	}
  	mutex_unlock(&i915->ggtt.vm.mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7c2a396a63f..8ccde5761c2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -170,7 +170,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
  search_again:
  	active = NULL;
  	INIT_LIST_HEAD(&eviction_list);
-	list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+	list_for_each_entry_safe(vma, next, &vm->vma_list, vm_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;

Kick them to the end (even after active)? Would need a temporary list head I think, but that shouldn't be too bad. Maybe another temporary list head would simplify the active vma logic below as well. It would basically mean a sorting step from vm->vma_list to active/inactive/unbound local lists, then a tri-phase loop as before, and then join all lists together replacing vma->vma_list. Downside is wasted sorting effort if few first found inactives satisfy the eviction pass.. so maybe it is not worth it. Or we could check as we build. No idea if more order after the eviction pass would be beneficial though.. this all only affects ggtt in practice right?

+
  		if (i915_vma_is_active(vma)) {
  			if (vma == active) {
  				if (flags & PIN_NONBLOCK)
@@ -183,7 +186,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
  				if (!active)
  					active = vma;
- list_move_tail(&vma->vm_link, &vm->bound_list);
+				list_move_tail(&vma->vm_link, &vm->vma_list);
  				continue;
  			}
  		}
@@ -419,7 +422,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
INIT_LIST_HEAD(&eviction_list);
  	mutex_lock(&vm->mutex);
-	list_for_each_entry(vma, &vm->bound_list, vm_link) {
+	list_for_each_entry(vma, &vm->vma_list, vm_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
  		if (i915_vma_is_pinned(vma))
  			continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7ae10fbb2ee0..17b752f9a1e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -492,8 +492,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->unbound_list);
-	INIT_LIST_HEAD(&vm->bound_list);
+	INIT_LIST_HEAD(&vm->vma_list);
  }
static void i915_address_space_fini(struct i915_address_space *vm)
@@ -1934,7 +1933,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
  	INIT_LIST_HEAD(&vma->obj_link);
mutex_lock(&vma->vm->mutex);
-	list_add(&vma->vm_link, &vma->vm->unbound_list);
+	list_add(&vma->vm_link, &vma->vm->vma_list);
  	mutex_unlock(&vma->vm->mutex);
return vma;
@@ -2113,19 +2112,12 @@ void i915_ppgtt_close(struct i915_address_space *vm)
static void ppgtt_destroy_vma(struct i915_address_space *vm)
  {
-	struct list_head *phases[] = {
-		&vm->bound_list,
-		&vm->unbound_list,
-		NULL,
-	}, **phase;
+	struct i915_vma *vma, *vn;
vm->closed = true;
-	for (phase = phases; *phase; phase++) {
-		struct i915_vma *vma, *vn;
-
-		list_for_each_entry_safe(vma, vn, *phase, vm_link)
-			i915_vma_destroy(vma);
-	}
+	list_for_each_entry_safe(vma, vn, &vm->vma_list, vm_link)
+		i915_vma_destroy(vma);
+	GEM_BUG_ON(!list_empty(&vm->vma_list));
  }
void i915_ppgtt_release(struct kref *kref)
@@ -2137,9 +2129,6 @@ void i915_ppgtt_release(struct kref *kref)
ppgtt_destroy_vma(&ppgtt->vm); - GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
-	GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
-
  	ppgtt->vm.cleanup(&ppgtt->vm);
  	i915_address_space_fini(&ppgtt->vm);
  	kfree(ppgtt);
@@ -2802,7 +2791,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
  	mutex_lock(&dev_priv->drm.struct_mutex);
  	i915_gem_fini_aliasing_ppgtt(dev_priv);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+	list_for_each_entry_safe(vma, vn, &ggtt->vm.vma_list, vm_link)
  		WARN_ON(i915_vma_unbind(vma));
if (drm_mm_node_allocated(&ggtt->error_capture))
@@ -3514,7 +3503,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
  	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
-	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+	list_for_each_entry_safe(vma, vn, &ggtt->vm.vma_list, vm_link) {
  		struct drm_i915_gem_object *obj = vma->obj;
if (!(vma->flags & I915_VMA_GLOBAL_BIND))

It looks like this check prevents it from re-binding what hasn't been bound. But why was this check here when walking the bound list? Should it have been a GEM_BUG_ON checking that it is bound instead?

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index bd679c8c56dd..ac11f2bce2d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -298,15 +298,7 @@ struct i915_address_space {
  	struct i915_page_directory *scratch_pd;
  	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
- /**
-	 * List of vma currently bound.
-	 */
-	struct list_head bound_list;
-
-	/**
-	 * List of vma that are not unbound.
-	 */
-	struct list_head unbound_list;
+	struct list_head vma_list;
struct pagestash free_pages; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 9d04c26c04f9..f4bc72d4f44f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -491,8 +491,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
  	mutex_lock(&i915->ggtt.vm.mutex);
-	list_for_each_entry_safe(vma, next,
-				 &i915->ggtt.vm.bound_list, vm_link) {
+	list_for_each_entry_safe(vma, next, &i915->ggtt.vm.vma_list, vm_link) {
  		unsigned long count = vma->node.size >> PAGE_SHIFT;

Skip unbound here? Or you are proxying the check with vma->iomap? That could use a comment.

if (!vma->iomap || i915_vma_is_active(vma))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21de3a5e9910..7c8725f12cef 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -703,10 +703,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
  	vma->flags |= I915_VMA_GLOBAL_BIND;
  	__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
-	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
-	mutex_unlock(&ggtt->vm.mutex);
-
  	spin_lock(&dev_priv->mm.obj_lock);
  	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
  	obj->bind_count++;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 61ae07993ae7..8cd7e444f38f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1609,7 +1609,7 @@ static void gem_capture_vm(struct i915_gpu_state *error,
  	int count;
count = 0;
-	list_for_each_entry(vma, &vm->bound_list, vm_link)
+	list_for_each_entry(vma, &vm->vma_list, vm_link)
  		if (i915_vma_is_active(vma))
  			count++;
@@ -1617,7 +1617,7 @@ static void gem_capture_vm(struct i915_gpu_state *error,
  	if (count)
  		active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
  	if (active_bo)
-		count = capture_error_bo(active_bo, count, &vm->bound_list,
+		count = capture_error_bo(active_bo, count, &vm->vma_list,
  					 true, false);
  	else
  		count = 0;
@@ -1659,8 +1659,9 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
  	int count;
count = 0;
-	list_for_each_entry(vma, &vm->bound_list, vm_link)
-		count++;
+	list_for_each_entry(vma, &vm->vma_list, vm_link)
+		if (i915_vma_is_pinned(vma))
+			count++;
bo = NULL;
  	if (count)
@@ -1669,7 +1670,7 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
  		return;
error->pinned_bo_count =
-		capture_error_bo(bo, count, &vm->bound_list, false, true);
+		capture_error_bo(bo, count, &vm->vma_list, false, true);
  	error->pinned_bo = bo;
  }
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index dcbd0d345c72..ad76a3309830 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -215,7 +215,7 @@ vma_create(struct drm_i915_gem_object *obj,
  	rb_insert_color(&vma->obj_node, &obj->vma_tree);
mutex_lock(&vm->mutex);
-	list_add(&vma->vm_link, &vm->unbound_list);
+	list_add_tail(&vma->vm_link, &vm->vma_list);
  	mutex_unlock(&vm->mutex);
return vma;
@@ -659,10 +659,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
  	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
- mutex_lock(&vma->vm->mutex);
-	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
-	mutex_unlock(&vma->vm->mutex);
-
  	if (vma->obj) {
  		struct drm_i915_gem_object *obj = vma->obj;
@@ -696,7 +692,6 @@ i915_vma_remove(struct i915_vma *vma) mutex_lock(&vma->vm->mutex);
  	drm_mm_remove_node(&vma->node);
-	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
  	mutex_unlock(&vma->vm->mutex);
/*
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index eaefba7470f7..8750cfbf1486 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -57,11 +57,6 @@ static int populate_ggtt(struct drm_i915_private *i915)
  		return -EINVAL;
  	}
- if (list_empty(&i915->ggtt.vm.bound_list)) {
-		pr_err("No objects on the GGTT inactive list!\n");
-		return -EINVAL;
-	}
-
  	return 0;
  }
@@ -71,8 +66,10 @@ static void unpin_ggtt(struct drm_i915_private *i915)
  	struct i915_vma *vma;
mutex_lock(&ggtt->vm.mutex);
-	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
-		i915_vma_unpin(vma);
+	list_for_each_entry(vma, &i915->ggtt.vm.vma_list, vm_link) {
+		if (i915_vma_is_pinned(vma))
+			i915_vma_unpin(vma);
+	}
  	mutex_unlock(&ggtt->vm.mutex);
  }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 850f9eff6029..550cb72f1e2b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1237,10 +1237,6 @@ static void track_vma_bind(struct i915_vma *vma)
  	__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
-
-	mutex_lock(&vma->vm->mutex);
-	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
-	mutex_unlock(&vma->vm->mutex);
  }
static int exercise_mock(struct drm_i915_private *i915,


Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux