Since the guc allocates and pins and object into the GGTT for its usage, it is more natural to use that pinned VMA as our resource cookie. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 10 +-- drivers/gpu/drm/i915/i915_guc_submission.c | 131 ++++++++++++++--------------- drivers/gpu/drm/i915/intel_guc.h | 9 +- drivers/gpu/drm/i915/intel_guc_loader.c | 7 +- 4 files changed, 73 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9154919fdd56..485fc23893d6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2601,15 +2601,15 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; - u32 *log; + struct drm_i915_gem_object *obj; int i = 0, pg; - if (!log_obj) + if (dev_priv->guc.log == NULL) return 0; - for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { - log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); + obj = dev_priv->guc.log->obj; + for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) { + u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg)); for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 63ef34c78494..1c92c4c6b0e1 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -357,8 +357,8 @@ static void guc_init_proc_desc(struct intel_guc *guc, static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { - struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_gem_object *client_obj = client->client->obj; struct intel_engine_cs *engine; struct i915_gem_context *ctx = client->owner; struct guc_context_desc desc; @@ -412,7 +412,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ - gfx_addr = i915_gem_obj_ggtt_offset(client_obj); + gfx_addr = client->client->node.start; desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) + client->doorbell_offset; desc.db_trigger_cpu = (uintptr_t)client->client_base + @@ -429,7 +429,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.desc_private = (uintptr_t)client; /* Pool context is pinned already */ - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool->obj->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -442,7 +442,7 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, memset(&desc, 0, sizeof(desc)); - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool->obj->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -524,7 +524,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, /* WQ starts from the page after doorbell / process_desc */ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); + base = kmap_atomic(i915_gem_object_get_page(gc->client->obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); /* Now fill in the 4-word work queue item */ @@ -588,8 +588,8 @@ void i915_guc_submit(struct drm_i915_gem_request *rq) */ /** - * gem_allocate_guc_obj() - Allocate gem object for GuC usage - * @dev: drm device + * guc_allocate_vma() - Allocate gem object for GuC usage + * @guc: the guc * @size: size of object * * This is a wrapper to create a gem obj. In order to use it inside GuC, the @@ -598,46 +598,40 @@ void i915_guc_submit(struct drm_i915_gem_request *rq) * * Return: A drm_i915_gem_object if successful, otherwise NULL. */ -static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, - u32 size) +static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_gem_object *obj; + int ret; - obj = i915_gem_object_create(dev, size); + obj = i915_gem_object_create(dev_priv->dev, size); if (IS_ERR(obj)) - return NULL; + return ERR_CAST(obj); - if (i915_gem_object_get_pages(obj)) { + ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ret) { i915_gem_object_put(obj); - return NULL; - } - - if (i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) { - i915_gem_object_put(obj); - return NULL; + return ERR_PTR(ret); } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return obj; + return i915_gem_obj_to_ggtt(obj); } /** - * gem_release_guc_obj() - Release gem object allocated for GuC usage - * @obj: gem obj to be released + * guc_release_vma() - Release gem object allocated for GuC usage + * @vma: gem obj to be released */ -static void gem_release_guc_obj(struct drm_i915_gem_object *obj) +static void guc_release_vma(struct i915_vma *vma) { - if (!obj) + if (vma == NULL) return; - if (i915_gem_obj_is_pinned(obj)) - i915_gem_object_ggtt_unpin(obj); - - i915_gem_object_put(obj); + i915_vma_unpin(vma); + i915_gem_object_put(vma->obj); } static void guc_client_free(struct drm_device *dev, @@ -671,7 +665,7 @@ static void guc_client_free(struct drm_device *dev, kunmap(kmap_to_page(client->client_base)); } - gem_release_guc_obj(client->client_obj); + guc_release_vma(client->client); if (client->ctx_index != GUC_INVALID_CTX_ID) { guc_fini_ctx_desc(guc, client); @@ -700,7 +694,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, struct i915_guc_client *client; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) @@ -719,13 +713,13 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, } /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); - if (!obj) + vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); + if (IS_ERR(vma)) goto err; /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ - client->client_obj = obj; - client->client_base = kmap(i915_gem_object_get_page(obj, 0)); + client->client = vma; + client->client_base = kmap(i915_gem_object_get_page(vma->obj, 0)); client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; @@ -769,8 +763,7 @@ err: static void guc_create_log(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; unsigned long offset; uint32_t size, flags; @@ -786,16 +779,16 @@ static void guc_create_log(struct intel_guc *guc) GUC_LOG_ISR_PAGES + 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; - obj = guc->log_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, size); - if (!obj) { + vma = guc->log; + if (vma == NULL) { + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) { /* logging will be off */ i915.guc_log_level = -1; return; } - guc->log_obj = obj; + guc->log = vma; } /* each allocated unit is a page */ @@ -804,7 +797,7 @@ static void guc_create_log(struct intel_guc *guc) (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */ + offset = vma->node.start >> PAGE_SHIFT; /* in pages */ guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } @@ -833,7 +826,7 @@ static void init_guc_policies(struct guc_policies *policies) static void guc_create_ads(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; struct guc_ads *ads; struct guc_policies *policies; struct guc_mmio_reg_state *reg_state; @@ -846,16 +839,16 @@ static void guc_create_ads(struct intel_guc *guc) sizeof(struct guc_mmio_reg_state) + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; - obj = guc->ads_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); - if (!obj) + vma = guc->ads; + if (vma == NULL) { + vma = guc_allocate_vma(guc, PAGE_ALIGN(size)); + if (IS_ERR(vma)) return; - guc->ads_obj = obj; + guc->ads = vma; } - page = i915_gem_object_get_page(obj, 0); + page = i915_gem_object_get_page(vma->obj, 0); ads = kmap(page); /* @@ -875,8 +868,7 @@ static void guc_create_ads(struct intel_guc *guc) policies = (void *)ads + sizeof(struct guc_ads); init_guc_policies(policies); - ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + - sizeof(struct guc_ads); + ads->scheduler_policies = vma->node.start + sizeof(struct guc_ads); /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); @@ -904,22 +896,22 @@ static void guc_create_ads(struct intel_guc *guc) */ int i915_guc_submission_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - const size_t ctxsize = sizeof(struct guc_context_desc); - const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; - const size_t gemsize = round_up(poolsize, PAGE_SIZE); - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &to_i915(dev)->guc; + struct i915_vma *vma; + u32 size; if (!i915.enable_guc_submission) return 0; /* not enabled */ - if (guc->ctx_pool_obj) + if (guc->ctx_pool) return 0; /* already allocated */ - guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); - if (!guc->ctx_pool_obj) - return -ENOMEM; + size = PAGE_ALIGN(GUC_MAX_GPU_CONTEXTS*sizeof(struct guc_context_desc)); + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + guc->ctx_pool = vma; ida_init(&guc->ctx_ids); guc_create_log(guc); @@ -962,19 +954,18 @@ void i915_guc_submission_disable(struct drm_device *dev) void i915_guc_submission_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &to_i915(dev)->guc; - gem_release_guc_obj(dev_priv->guc.ads_obj); - guc->ads_obj = NULL; + guc_release_vma(guc->ads); + guc->ads = NULL; - gem_release_guc_obj(dev_priv->guc.log_obj); - guc->log_obj = NULL; + guc_release_vma(guc->log); + guc->log = NULL; - if (guc->ctx_pool_obj) + if (guc->ctx_pool) ida_destroy(&guc->ctx_ids); - gem_release_guc_obj(guc->ctx_pool_obj); - guc->ctx_pool_obj = NULL; + guc_release_vma(guc->ctx_pool); + guc->ctx_pool = NULL; } /** diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 7f9063385258..3b65500d4509 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -62,7 +62,7 @@ struct drm_i915_gem_request; * retcode: errno from last guc_submit() */ struct i915_guc_client { - struct drm_i915_gem_object *client_obj; + struct i915_vma *client; void *client_base; /* first page (only) of above */ struct i915_gem_context *owner; struct intel_guc *guc; @@ -124,11 +124,10 @@ struct intel_guc_fw { struct intel_guc { struct intel_guc_fw guc_fw; uint32_t log_flags; - struct drm_i915_gem_object *log_obj; + struct i915_vma *log; - struct drm_i915_gem_object *ads_obj; - - struct drm_i915_gem_object *ctx_pool_obj; + struct i915_vma *ads; + struct i915_vma *ctx_pool; struct ida ctx_ids; struct i915_guc_client *execbuf_client; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index be93b458968a..1ecf88fd0b10 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -179,16 +179,15 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } - if (guc->ads_obj) { - u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj) - >> PAGE_SHIFT; + if (guc->ads) { + u32 ads = (u32)guc->ads->node.start >> PAGE_SHIFT; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; } /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { - u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); + u32 pgs = dev_priv->guc.ctx_pool->node.start; u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx