From: Ben Widawsky <bwidawsk at gmail.com> While loading the context submodule, which is called from the main i915 driver, we must create and initialize the default context for the device. The default context is used for clients which do not set up contexts, and as a known good state for the hardware. Doing this at initialization allows later clients to determine whether or not context support exist before actually trying to create or run them (if there was an error at load, contexts will be disabled). Unloading the context is much easier. The ref counts, and pin counts are taken care of by the rest of the driver, so all this code needs to do is free the kernel memory associated with the default context. We also implement the previously introduced ioctls. Signed-off-by: Ben Widawsky <ben at bwidawsk.net> --- drivers/gpu/drm/i915/i915_context.c | 495 ++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_drv.h | 9 + include/drm/i915_drm.h | 4 +- 3 files changed, 502 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_context.c b/drivers/gpu/drm/i915/i915_context.c index 9108244..0547411 100644 --- a/drivers/gpu/drm/i915/i915_context.c +++ b/drivers/gpu/drm/i915/i915_context.c @@ -26,46 +26,533 @@ #include "drmP.h" #include "i915_drm.h" +#include "i915_drv.h" +/* This ID must be 0 if reusing fields from execbuf2. */ +#define DEFAULT_CONTEXT_ID 0 + +/* HW requirement */ +#define CONTEXT_ALIGN 2048 + +static int context_idr_cleanup(int id, void *p, void *data); + +static int context_generate_id(struct drm_i915_gem_context *ctx) +{ + struct drm_i915_file_private *file_priv = ctx->file->driver_priv; + int ret, id; + + if (WARN_ON(!mutex_is_locked(&ctx->dev->struct_mutex))) + return -ENOENT; + +again: + if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) + return -ENOMEM; + + spin_lock(&file_priv->context_idr_lock); + ret = idr_get_new_above(&file_priv->context_idr, ctx, + DEFAULT_CONTEXT_ID + 1, &id); + if (ret == -EAGAIN) { + spin_unlock(&file_priv->context_idr_lock); + goto again; + } + spin_unlock(&file_priv->context_idr_lock); + + return id; +} + +static void context_destroy_id(struct drm_i915_gem_context *ctx) +{ + struct drm_i915_file_private *file_priv = ctx->file->driver_priv; + + spin_lock(&file_priv->context_idr_lock); + idr_remove(&file_priv->context_idr, ctx->id); + spin_unlock(&file_priv->context_idr_lock); +} + +/* + * Initialize a context for the given ring. + */ +static int context_init(struct drm_i915_gem_context *ctx, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_context *last = NULL; + int ret; + + if (ring->context_switch == NULL) + return -ENOENT; + + if (ctx->is_default) { + /* + * NB: default context is always first. The first context needs + * to do an extra save because the first save (according to the + * spec) doesn't actually do anything. So the outcome is + * 1. Save without restore (no context saved) + * 2. Save without restore (context is saved) + * 3. Save with restore (loads the ctx from step 2) + * + * We have to directly use the ring functions because we need + * special semantics. + */ + last = ring->context_switch(ring, ctx, I915_CONTEXT_SAVE_ONLY); + if (last != ctx) { + DRM_ERROR("Context switch state invalid"); + ret = -EIO; + goto dctx_err_out; + } + last = ring->context_switch(ring, ctx, + I915_CONTEXT_SAVE_ONLY | + I915_CONTEXT_FORCED_SWITCH); + if (last != ctx) { + DRM_ERROR("First context switch fail"); + ret = -EIO; + goto dctx_err_out; + } + last = ring->context_switch(ring, ctx, + I915_CONTEXT_NORMAL_SWITCH | + I915_CONTEXT_FORCED_SWITCH); + if (last != ctx) { + DRM_ERROR("Final context switch fail"); + ret = -EIO; + goto dctx_err_out; + } + } else { + /* Initialize the context now so when we later switch to it we + * have a "properly" setup context. + */ + u32 tmp = 0; + ret = i915_switch_context(ring, ctx, &tmp); + if (ret) + return ret; + } + + return 0; + +dctx_err_out: + i915_release_context(ctx); + return ret; +} + +/* + * Logical context is created, and initialized. The context has a backing buffer + * object which is referenced at the end of this function. + * + * @ctx_out: output context which was created + * + * In theory a context can exist for multiple rings (currently RCS, and + * VCS), but afaik they can't be shared. Try + */ +static int logical_context_alloc(struct drm_device *dev, struct drm_file *file, + struct drm_i915_gem_context **ctx_out) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_context *ctx; + int ret; + + if (WARN_ON(!mutex_is_locked(&dev->struct_mutex))) + return -EINVAL; + + ctx = kzalloc(sizeof(struct drm_i915_gem_context), GFP_KERNEL); + if (ctx == NULL) + return -ENOMEM; + + ctx->dev = dev; + ctx->file = file; + + if (dev_priv->default_context == *ctx_out) { + ctx->is_default = true; + ctx->id = DEFAULT_CONTEXT_ID; + } else + ctx->id = context_generate_id(ctx); + + + ctx->obj = i915_gem_alloc_object(dev, dev_priv->context_size); + if (!ctx->obj) { + ret = -ENOMEM; + goto id_out; + } + + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); + if (ret) { + DRM_ERROR("Failed to pin context: %d\n", ret); + goto err_unref; + } + + ret = i915_gem_object_set_to_gtt_domain(ctx->obj, false); + if (ret) { + DRM_ERROR("failed to set domain on context: %d", ret); + goto err_unpin; + } + + drm_gem_object_reference(&ctx->obj->base); + + if (ctx->is_default) + BUG_ON(i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false)); + + /* NB: future may want to initialize for multiple rings */ + ret = context_init(ctx, &dev_priv->ring[RCS]); + if (!ret) + ctx->ring_enable |= (1 << RCS); + + if (ctx->ring_enable) + DRM_DEBUG_DRIVER("Context %d allocated, rings %x\n", ctx->id, + ctx->ring_enable); + else + ctx = NULL; + + *ctx_out = ctx; + return 0; + +err_unpin: + i915_gem_object_unpin(ctx->obj); +err_unref: + drm_gem_object_unreference(&ctx->obj->base); +id_out: + if (!ctx->is_default) + context_destroy_id(ctx); + kfree(ctx); + return ret; +} + +/* Switch to default context for the specified ring */ +static int switch_to_default(struct intel_ring_buffer *ring, + struct drm_file *file) +{ + struct drm_i915_gem_context *dflt; + u32 tmp = 0; + int ret; + + dflt = i915_get_context(file, DEFAULT_CONTEXT_ID); + + DRM_DEBUG_DRIVER("Switching to default context\n"); + + ret = i915_switch_context(ring, dflt, &tmp); + if (ret) + DRM_ERROR("Couldn't switch back to default context\n"); + + return ret; +} + +static void logical_context_fini(struct drm_i915_gem_context *ctx) +{ + struct drm_device *dev = ctx->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (WARN_ON(!mutex_is_locked(&dev->struct_mutex))) + return; + + /* Since we want to have the default context around for the + * duration of the driver, let DRM clean up our default context + * object when needed */ + if (ctx == dev_priv->default_context) + return; + + for (i = 0; i < I915_NUM_RINGS; i++) { + struct intel_ring_buffer *ring = &dev_priv->ring[i]; + if (!ring->context_switch) + continue; + + if (!(ctx->ring_enable & (1 << i))) + continue; + + if (ring->last_context != ctx) + continue; + + /* + * XXX We can prevent restoring contexts, but not saving them + * so if we're going to take away our backing context object + * of the last context, we have to switch now. + */ + if (switch_to_default(ring, ctx->file)) { + DRM_ERROR("Failed to destroy. A BO may be leaked\n"); + continue; + } + } +} + +/* + * Free a context and all it's associated memory. By the end of the function the + * BO may not actually be freed since we need to do a context switch away from + * it first. + */ +static int logical_context_free(struct drm_file *file, uint32_t id) +{ + struct drm_device *dev; + struct drm_i915_private *dev_priv; + struct drm_i915_gem_context *ctx; + + if (WARN_ON(id == DEFAULT_CONTEXT_ID)) + return 0; + + /* ref and pin the object */ + ctx = i915_get_context(file, id); + if (!ctx) { + DRM_ERROR("Couldn't find context %d", id); + return -EINVAL; + } + + dev = ctx->dev; + dev_priv = dev->dev_private; + i915_release_context(ctx); + logical_context_fini(ctx); + + BUG_ON(ctx->obj->pin_count > 1); + drm_gem_object_unreference(&ctx->obj->base); + + context_destroy_id(ctx); + + ctx->file = NULL; + ctx->dev = NULL; + kfree(ctx); + + return 0; +} + +/** + * i915_context_create_ioctl() + */ int i915_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_context_create *args = data; - args->ctx_id = 0; + struct drm_i915_gem_context *out_ctx = NULL; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + if (dev_priv->contexts_disabled) { + mutex_unlock(&dev->struct_mutex); + return -ENOSPC; + } + ret = logical_context_alloc(dev, file, &out_ctx); + mutex_unlock(&dev->struct_mutex); + if (ret == 0) { + args->ctx_id = out_ctx->id; + if (WARN_ON(args->ctx_id == DEFAULT_CONTEXT_ID)) + return -ENXIO; + } + DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); - return -EIO; + return ret; } +/** + * i915_context_destroy_ioctl() + */ int i915_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_context_destroy *args = data; + int ret; + + if (args->ctx_id == DEFAULT_CONTEXT_ID) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + + if (dev_priv->contexts_disabled) { + mutex_unlock(&dev->struct_mutex); + return -ENOSPC; + } + DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); - return -EINVAL; + ret = logical_context_free(file, args->ctx_id); + mutex_unlock(&dev->struct_mutex); + return ret; } +static uint32_t +get_context_size(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ret = 0; + + switch (INTEL_INFO(dev)->gen) { + case 6: + ret = GEN6_CXT_TOTAL_SIZE * 64; + break; + case 7: + ret = GEN7_CTX_TOTAL_SIZE * 64; + break; + } + + return ret; +} + +/** + * i915_context_load() - Creates a default context. + * @dev: Device for which the context. + */ void i915_context_load(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t size; + + int ret; + if (!HAS_HW_CONTEXTS(dev)) { + dev_priv->contexts_disabled = true; + return; + } + + mutex_lock(&dev->struct_mutex); + size = get_context_size(dev); + if (size > 0x4000) /* 1MB */ { + DRM_ERROR("Context bo size seems invalid."); + size = 20; + } + size = roundup(size, 4096); + dev_priv->context_size = size; + DRM_DEBUG_DRIVER("Logical context size = %d\n", size); + ret = logical_context_alloc(dev, NULL, &dev_priv->default_context); + if (ret) + dev_priv->contexts_disabled = true; + else + DRM_DEBUG_DRIVER("HW context support initialized\n"); + mutex_unlock(&dev->struct_mutex); } void i915_context_unload(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->contexts_disabled) + return; + kfree(dev_priv->default_context); } void i915_context_open(struct drm_device *dev, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + + if (dev_priv->contexts_disabled) + return; + idr_init(&file_priv->context_idr); + spin_lock_init(&file_priv->context_idr_lock); } void i915_context_close(struct drm_device *dev, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + + if (dev_priv->contexts_disabled) + return; + + idr_for_each(&file_priv->context_idr, context_idr_cleanup, file); + idr_destroy(&file_priv->context_idr); +} + +static int context_idr_cleanup(int id, void *p, void *data) +{ + struct drm_file *file = (struct drm_file *) data; + int ret; + mutex_lock(&file->minor->dev->struct_mutex); + ret = logical_context_free(file, id); + mutex_unlock(&file->minor->dev->struct_mutex); + return ret; } +/** + * Lookup a context by id, pin, and acquire a reference. + */ struct drm_i915_gem_context *i915_get_context(struct drm_file *file, uint32_t id) { - return NULL; + struct drm_i915_private *dev_priv = file->minor->dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context *ctx = NULL; + int ret; + + spin_lock(&file_priv->context_idr_lock); + if (id == DEFAULT_CONTEXT_ID) + ctx = dev_priv->default_context; + else + ctx = idr_find(&file_priv->context_idr, id); + if (ctx) { + /* Since we touch the pinned list, we must hold struct_mutex + * when calling this function. + */ + BUG_ON(!mutex_is_locked(&ctx->dev->struct_mutex)); + drm_gem_object_reference(&ctx->obj->base); + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); + if (ret) { + drm_gem_object_unreference(&ctx->obj->base); + ctx = NULL; + } + } + spin_unlock(&file_priv->context_idr_lock); + + return ctx; +} + +void i915_release_context(struct drm_i915_gem_context *ctx) +{ + BUG_ON(!mutex_is_locked(&ctx->dev->struct_mutex)); + BUG_ON(ctx->is_default && ctx->obj->pin_count == 1); + + i915_gem_object_unpin(ctx->obj); + drm_gem_object_unreference(&ctx->obj->base); +} + +/* + * Do a context switch on ring to new context. If *seqno is 0, create + * a new request when the context can be retired + */ +int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_i915_gem_context *to, + u32 *seqno) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_context *from; + u32 type; + int ret = 0; + + if (dev_priv->contexts_disabled || !ring->context_switch) + return 0; + + BUG_ON(to == NULL); + BUG_ON(to->obj->pin_count == 0); + + /* + * The first time a context is loaded, it is unsafe to restore the + * uninitialized context + */ + if (!to->is_initialized) + type = I915_CONTEXT_SAVE_ONLY; + else + type = I915_CONTEXT_NORMAL_SWITCH; + + from = ring->context_switch(ring, to, type); + BUG_ON(from == NULL); + + to->is_initialized = true; + + if (!from->is_default && from != to) { + if (*seqno == 0) { + struct drm_i915_gem_request *request; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (WARN_ON(request == NULL)) { + ret = -ENOMEM; + goto out; + } + + ret = i915_add_request(ring, NULL, request); + if (ret) { + kfree(request); + goto out; + } + *seqno = request->seqno; + } + + i915_gem_object_move_to_active(from->obj, ring, *seqno); + } + +out: + i915_release_context(from); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4175929..f84d8f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -759,7 +759,12 @@ typedef struct drm_i915_private { struct backlight_device *backlight; struct drm_property *broadcast_rgb_property; + struct drm_property *force_audio_property; + + uint32_t context_size; + struct drm_i915_gem_context *default_context; + bool contexts_disabled; } drm_i915_private_t; enum i915_cache_level { @@ -1200,6 +1205,10 @@ extern void i915_context_open(struct drm_device *dev, struct drm_file *file); extern void i915_context_close(struct drm_device *dev, struct drm_file *file); extern struct drm_i915_gem_context *i915_get_context(struct drm_file *file, uint32_t id); +extern void i915_release_context(struct drm_i915_gem_context *ctx); +extern int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_i915_gem_context *to, + u32 *seqno); /** * Returns true if seq1 is later than seq2. diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8fa509e..b8aa665 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -891,10 +891,10 @@ struct drm_intel_sprite_colorkey { }; struct drm_i915_gem_context_create { - /* output: id of new context*/ + /* output: id of new context */ __u32 ctx_id; - __u16 pad; + __u32 rsvd; }; struct drm_i915_gem_context_destroy { -- 1.7.9