[PATCH 03/18] drm/i915: context basic create & destroy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This commit doesn't really match up 1:1 with any of the RFC patches. The
primary difference in the equivalent logic though is the new use of a reference
counter for the context life cycle. This allows us to free the context backing
object, and context kernel memory separately.

For example if a context is destroyed by IOCTL, or by file close, the
context object itself may still be referenced by the GPU. The object
should be properly destroyed by the existing active/inactive list code,
but that occurs sometime in the future. So we can either refcount the
context object and destroy it immediately, or put a hook when we walk
the free list.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h         |   14 +++
 drivers/gpu/drm/i915/i915_gem_context.c |  168 ++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |    2 +
 3 files changed, 182 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 33c232a..e49e2f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -277,6 +277,18 @@ struct i915_hw_ppgtt {
 	dma_addr_t scratch_page_dma_addr;
 };
 
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+	struct drm_i915_file_private *file_priv;
+	struct kref nref;
+
+	int id;
+	struct intel_ring_buffer *ring;
+	struct drm_i915_gem_object *obj;
+};
+
 enum no_fbc_reason {
 	FBC_NO_OUTPUT, /* no outputs enabled to compress */
 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@@ -981,6 +993,8 @@ struct drm_i915_file_private {
 		struct spinlock lock;
 		struct list_head request_list;
 	} mm;
+	struct spinlock context_lock;
+	struct idr context_idr;
 };
 
 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index caa0e06..2c9116d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -29,6 +29,15 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define CONTEXT_ALIGN (64<<10)
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+
 static int get_context_size(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -53,6 +62,91 @@ static int get_context_size(struct drm_device *dev)
 	return ret;
 }
 
+static void do_destroy(struct i915_hw_context *ctx)
+{
+	struct drm_device *dev = ctx->obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (ctx->file_priv)
+		idr_remove(&ctx->file_priv->context_idr, ctx->id);
+	else
+		BUG_ON(ctx != dev_priv->ring[RCS].default_context);
+
+	drm_gem_object_unreference(&ctx->obj->base);
+	kfree(ctx);
+}
+
+static int
+create_hw_context(struct drm_device *dev,
+		  struct drm_i915_file_private *file_priv,
+		  struct i915_hw_context **ctx_out)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, id;
+
+	*ctx_out = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+	if (*ctx_out == NULL)
+		return -ENOMEM;
+
+	(*ctx_out)->obj = i915_gem_alloc_object(dev,
+						dev_priv->hw_context_size);
+	if ((*ctx_out)->obj == NULL) {
+		kfree(*ctx_out);
+		return -ENOMEM;
+	}
+
+	kref_init(&(*ctx_out)->nref);
+
+	/* The ring associated with the context object is handled by the normal
+	 * object tracking code. We give an initial ring value simple to pass an
+	 * assertion in the context switch code.
+	 */
+	(*ctx_out)->ring = &dev_priv->ring[RCS];
+
+	/* Default context will never have a file_priv */
+	if (file_priv == NULL)
+		return 0;
+
+	(*ctx_out)->file_priv = file_priv;
+
+again:
+	if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	spin_lock(&file_priv->context_lock);
+	ret = idr_get_new_above(&file_priv->context_idr, *ctx_out,
+				DEFAULT_CONTEXT_ID + 1, &id);
+	if (ret == 0)
+		(*ctx_out)->id = id;
+	spin_unlock(&file_priv->context_lock);
+
+	if (ret == -EAGAIN) {
+		goto again;
+	} else if (ret)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	do_destroy(*ctx_out);
+	return ret;
+}
+
+static void destroy_hw_context(struct kref *kref)
+{
+	struct i915_hw_context *ctx = container_of(kref, struct i915_hw_context,
+						   nref);
+
+	/* The backing object for the context may still be in use if the context
+	 * switch away hasn't yet occurred. So the freeing of the object is
+	 * asynchronously (and asymmetrically) handled by the normal i915
+	 * buffer life cycle.
+	 */
+	do_destroy(ctx);
+}
+
 /**
  * The default context needs to exist per ring that uses contexts. It stores the
  * context state of the GPU for applications that don't utilize HW contexts, as
@@ -60,7 +154,28 @@ static int get_context_size(struct drm_device *dev)
  */
 static int create_default_context(struct drm_i915_private *dev_priv)
 {
-	return 0;
+	struct i915_hw_context *ctx;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+	ret = create_hw_context(dev_priv->dev, NULL,
+				&dev_priv->ring[RCS].default_context);
+	if (ret)
+		return ret;
+
+	/* We may need to do things with the shrinker which require us to
+	 * immediately switch back to the default context. This can cause a
+	 * problem as pinning the default context also requires GTT space which
+	 * may not be available. To avoid this we always pin the
+	 * default context.
+	 */
+	ctx = dev_priv->ring[RCS].default_context;
+	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
+	if (ret)
+		do_destroy(ctx);
+
+	return ret;
 }
 
 void i915_gem_context_load(struct drm_device *dev)
@@ -69,7 +184,8 @@ void i915_gem_context_load(struct drm_device *dev)
 	uint32_t ctx_size;
 
 	/* If called from reset, or thaw... we've been here already */
-	if (dev_priv->hw_contexts_disabled)
+	if (dev_priv->hw_contexts_disabled ||
+	    dev_priv->ring[RCS].default_context)
 		return;
 
 	ctx_size = get_context_size(dev);
@@ -95,20 +211,68 @@ void i915_gem_context_unload(struct drm_device *dev)
 
 	if (dev_priv->hw_contexts_disabled)
 		return;
+
+	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
+
+	BUG_ON(kref_put(&dev_priv->ring[RCS].default_context->nref,
+		        destroy_hw_context) == 0);
 }
 
 void i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	if (dev_priv->hw_contexts_disabled)
 		return;
+
+	idr_init(&file_priv->context_idr);
+	spin_lock_init(&file_priv->context_lock);
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+	struct drm_file *file = (struct drm_file *) data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct i915_hw_context *ctx;
+
+	BUG_ON(id == DEFAULT_CONTEXT_ID);
+	ctx = i915_gem_context_get(file_priv, id);
+	BUG_ON(ctx == NULL);
+	kref_put(&ctx->nref, destroy_hw_context);
+	kref_put(&ctx->nref, destroy_hw_context);
+
+	return 0;
 }
 
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	if (dev_priv->hw_contexts_disabled)
 		return;
+
+	mutex_lock(&dev->struct_mutex);
+	idr_for_each(&file_priv->context_idr, context_idr_cleanup, file);
+	idr_destroy(&file_priv->context_idr);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static __used struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+	struct i915_hw_context *ctx = NULL;
+
+	/* kref_get must be synchronized with destroy, and file close. If not we
+	 * could find the ctx, and before getting the reference it may be
+	 * destroyed.
+	 */
+	spin_lock(&file_priv->context_lock);
+	ctx = idr_find(&file_priv->context_idr, id);
+	BUG_ON(!mutex_is_locked(&ctx->ring->dev->struct_mutex));
+	kref_get(&ctx->nref);
+	spin_unlock(&file_priv->context_lock);
+
+	return ctx;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b..8c9f898 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -120,6 +120,8 @@ struct  intel_ring_buffer {
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
 
+	struct i915_hw_context *default_context;
+
 	void *private;
 };
 
-- 
1.7.9.4



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux