Re: [PATCH 10/11] drm/i915: Mark the context and address space as closed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 14/12/15 11:36, Chris Wilson wrote:
When the user closes the context mark it and the dependent address space
as closed. As we use an asynchronous destruct method, this has two purposes.
First it allows us to flag the closed context and detect internal errors if
we to create any new objects for it (as it is removed from the user's
namespace, these should be internal bugs only). And secondly, it allows
us to immediately reap stale vma.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/i915_drv.h         |  4 ++++
  drivers/gpu/drm/i915/i915_gem.c         | 15 ++++++++-----
  drivers/gpu/drm/i915/i915_gem_context.c | 39 +++++++++++++++++++++++++++++----
  drivers/gpu/drm/i915/i915_gem_gtt.c     | 11 +++++++---
  drivers/gpu/drm/i915/i915_gem_gtt.h     |  9 ++++++++
  drivers/gpu/drm/i915/i915_gem_stolen.c  |  2 +-
  6 files changed, 66 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 696469a06715..66ecd6b3df95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -892,6 +892,8 @@ struct intel_context {
  	} engine[I915_NUM_RINGS];

  	struct list_head link;
+
+	bool closed:1;
  };

  enum fb_op_origin {
@@ -2720,6 +2722,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
   * _guarantee_ VMA in question is _not in use_ anywhere.
   */
  int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7c13c27a6470..08ea0b7eda8b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2367,12 +2367,13 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  	return 0;
  }

-static void i915_vma_close(struct i915_vma *vma)
+void i915_vma_close(struct i915_vma *vma)
  {
  	RQ_BUG_ON(vma->closed);
  	vma->closed = true;

  	list_del_init(&vma->obj_link);
+	list_del_init(&vma->vm_link);
  	if (!vma->active)
  		WARN_ON(i915_vma_unbind(vma));
  }
@@ -2620,12 +2621,13 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
  			return ret;
  	}

-	trace_i915_vma_unbind(vma);
-
-	vma->vm->unbind_vma(vma);
+	if (likely(!vma->vm->closed)) {
+		trace_i915_vma_unbind(vma);
+		vma->vm->unbind_vma(vma);
+	}
  	vma->bound = 0;

-	list_del_init(&vma->vm_link);
+	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
  	if (vma->is_ggtt) {
  		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
  			obj->map_and_fenceable = false;
@@ -2882,7 +2884,7 @@ search_free:
  		goto err_remove_node;

  	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vm->inactive_list);

  	return vma;

@@ -3890,6 +3892,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
  	if (!list_empty(&vma->exec_list))
  		return;

+	list_del(&vma->vm_link);
  	if (!vma->is_ggtt)
  		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c4a8a64cd1b2..9669547c7c2d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -153,6 +153,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
  	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);

  	trace_i915_context_free(ctx);
+	RQ_BUG_ON(!ctx->closed);

  	if (i915.enable_execlists)
  		intel_lr_context_free(ctx);
@@ -209,6 +210,36 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
  	return obj;
  }

+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+	struct list_head *phases[] = {
+		&vm->active_list,
+		&vm->inactive_list,
+		&vm->unbound_list,
+		NULL,
+	}, **phase;
+
+	RQ_BUG_ON(vm->is_ggtt);
+	RQ_BUG_ON(vm->closed);
+	vm->closed = true;
+
+	for (phase = phases; *phase; phase++) {
+		struct i915_vma *vma, *vn;
+
+		list_for_each_entry_safe(vma, vn, *phase, vm_link)
+			i915_vma_close(vma);
+	}
+}

Hm so VMAs get unlinked from everywhere, but then on retire goes back to inactive. Is it not a bit weird?

Why it is needed to unlink VMAs from everywhere when marking them as closed?

And actually on retire objects are ahead of VMAs in the req->active_list so the last object unreference happens before the last vma is retired, which is even weirder.

Am I missing something?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux