v2: Decoupling ADS together with logs v2: Group initialization of GuC objects Cc: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> Cc: Oscar Mateo <oscar.mateo@xxxxxxxxx> Cc: Anusha Srivatsa <anusha.srivatsa@xxxxxxxxx> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@xxxxxxxxx> Signed-off-by: Sujaritha Sundaresan <sujaritha.sundaresan@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_guc_submission.c | 108 ++-------------------- drivers/gpu/drm/i915/intel_uc.c | 140 ++++++++++++++++++++++++++--- drivers/gpu/drm/i915/intel_uc.h | 6 +- 3 files changed, 134 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index a15146e..3a4bdb4 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -70,14 +70,6 @@ * represents in-order queue. The kernel driver packs ring tail pointer and an * ELSP context descriptor dword into Work Item. * See guc_wq_item_append() - * - * ADS: - * The Additional Data Struct (ADS) has pointers for different buffers used by - * the GuC. One single gem object contains the ADS struct itself (guc_ads), the - * scheduling policies (guc_policies), a structure describing a collection of - * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save - * its internal state for sleep. - * */ static inline bool is_high_priority(struct i915_guc_client* client) @@ -996,7 +988,7 @@ static void guc_client_free(struct i915_guc_client *client) kfree(client); } -static void guc_policies_init(struct guc_policies *policies) +void i915_guc_policies_init(struct guc_policies *policies) { struct guc_policy *policy; u32 p, i; @@ -1018,83 +1010,14 @@ static void guc_policies_init(struct guc_policies *policies) policies->is_valid = 1; } -static int guc_ads_create(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct i915_vma *vma; - struct page *page; - /* The ads obj includes the struct itself and buffers passed to GuC */ - struct { - struct guc_ads ads; - struct guc_policies policies; - struct guc_mmio_reg_state reg_state; - u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; - } __packed *blob; - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 base; - - GEM_BUG_ON(guc->ads_vma); - - vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - guc->ads_vma = vma; - - page = i915_vma_first_page(vma); - blob = kmap(page); - - /* GuC scheduling policies */ - guc_policies_init(&blob->policies); - - /* MMIO reg state */ - for_each_engine(engine, dev_priv, id) { - blob->reg_state.white_list[engine->guc_id].mmio_start = - engine->mmio_base + GUC_MMIO_WHITE_LIST_START; - - /* Nothing to be saved or restored for now. */ - blob->reg_state.white_list[engine->guc_id].count = 0; - } - - /* - * The GuC requires a "Golden Context" when it reinitialises - * engines after a reset. Here we use the Render ring default - * context, which must already exist and be pinned in the GGTT, - * so its address won't change after we've told the GuC where - * to find it. - */ - blob->ads.golden_context_lrca = - dev_priv->engine[RCS]->status_page.ggtt_offset; - - for_each_engine(engine, dev_priv, id) - blob->ads.eng_state_size[engine->guc_id] = engine->context_size; - - base = guc_ggtt_offset(vma); - blob->ads.scheduler_policies = base + ptr_offset(blob, policies); - blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); - blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); - - kunmap(page); - - return 0; -} - -static void guc_ads_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->ads_vma); -} - /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. */ -int i915_guc_submission_init(struct drm_i915_private *dev_priv) +int i915_guc_submission_shared_objects_init(struct intel_guc *guc) { - struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; void *vaddr; - int ret; if (guc->stage_desc_pool) return 0; @@ -1109,42 +1032,21 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_vma; + i915_vma_unpin_and_release(&guc->stage_desc_pool); + return PTR_ERR(vaddr); } guc->stage_desc_pool_vaddr = vaddr; - ret = intel_guc_log_create(guc); - if (ret < 0) - goto err_vaddr; - - - ret = guc_ads_create(guc); - if (ret < 0) - goto err_log; - ida_init(&guc->stage_ids); return 0; -err_log: - intel_guc_log_destroy(guc); - -err_vaddr: - i915_gem_object_unpin_map(guc->stage_desc_pool->obj); -err_vma: - i915_vma_unpin_and_release(&guc->stage_desc_pool); - return ret; } -void i915_guc_submission_fini(struct drm_i915_private *dev_priv) +void i915_guc_submission_shared_objects_fini(struct intel_guc *guc) { - struct intel_guc *guc = &dev_priv->guc; - ida_destroy(&guc->stage_ids); - guc_ads_destroy(guc); - intel_guc_log_destroy(guc); i915_gem_object_unpin_map(guc->stage_desc_pool->obj); i915_vma_unpin_and_release(&guc->stage_desc_pool); } diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 52687e3..a334926 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -326,6 +326,124 @@ static int guc_enable_communication(struct intel_guc *guc) return 0; } +/* + * The Additional Data Struct (ADS) has pointers for different buffers used by + * the GuC. One single gem object contains the ADS struct itself (guc_ads), the + * scheduling policies (guc_policies), a structure describing a collection of + * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save + * its internal state for sleep. + */ +static int guc_ads_create(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct i915_vma *vma; + struct page *page; + /* The ads obj includes the struct itself and buffers passed to GuC */ + struct { + struct guc_ads ads; + struct guc_policies policies; + struct guc_mmio_reg_state reg_state; + u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; + } __packed *blob; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 base; + + GEM_BUG_ON(guc->ads_vma); + + vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + guc->ads_vma = vma; + + page = i915_vma_first_page(vma); + blob = kmap(page); + + /* GuC scheduling policies */ + i915_guc_policies_init(&blob->policies); + + /* MMIO reg state */ + for_each_engine(engine, dev_priv, id) { + blob->reg_state.white_list[engine->guc_id].mmio_start = + engine->mmio_base + GUC_MMIO_WHITE_LIST_START; + + /* Nothing to be saved or restored for now. */ + blob->reg_state.white_list[engine->guc_id].count = 0; + } + + /* + * The GuC requires a "Golden Context" when it reinitialises + * engines after a reset. Here we use the Render ring default + * context, which must already exist and be pinned in the GGTT, + * so its address won't change after we've told the GuC where + * to find it. + */ + blob->ads.golden_context_lrca = + dev_priv->engine[RCS]->status_page.ggtt_offset; + + for_each_engine(engine, dev_priv, id) + blob->ads.eng_state_size[engine->guc_id] = engine->context_size; + + base = guc_ggtt_offset(vma); + blob->ads.scheduler_policies = base + ptr_offset(blob, policies); + blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); + blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); + + kunmap(page); + + return 0; +} + +static void guc_ads_destroy(struct intel_guc *guc) +{ + i915_vma_unpin_and_release(&guc->ads_vma); +} + +static int guc_shared_objects_init(struct intel_guc *guc) +{ + int ret; + + if (guc->ads_vma) + return 0; + + ret = intel_guc_log_create(guc); + if (ret < 0) + return ret; + + ret = guc_ads_create(guc); + if (ret < 0) + goto err_logs; + + if (i915.enable_guc_submission) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ + ret = i915_guc_submission_shared_objects_init(guc); + if (ret) + goto err_ads; + } + + return 0; + +err_ads: + guc_ads_destroy(guc); +err_logs: + intel_guc_log_destroy(guc); + + return ret; +} + +static void guc_shared_objects_fini(struct intel_guc *guc) +{ + if (i915.enable_guc_submission) + i915_guc_submission_shared_objects_fini(guc); + + guc_ads_destroy(guc); + intel_guc_log_destroy(guc); +} + static void guc_disable_communication(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -350,15 +468,10 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); - if (i915.enable_guc_submission) { - /* - * This is stuff we need to have available at fw load time - * if we are planning to enable submission later - */ - ret = i915_guc_submission_init(dev_priv); - if (ret) - goto err_guc; - } + ret = guc_shared_objects_init(guc); + if (ret) + goto err_guc; + /* init WOPCM */ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv)); @@ -379,7 +492,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) */ ret = __intel_uc_reset_hw(dev_priv); if (ret) - goto err_submission; + goto err_shared; intel_huc_init_hw(&dev_priv->huc); ret = intel_guc_init_hw(&dev_priv->guc); @@ -424,9 +537,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) gen9_disable_guc_interrupts(dev_priv); err_log_capture: guc_capture_load_err_log(guc); -err_submission: - if (i915.enable_guc_submission) - i915_guc_submission_fini(dev_priv); +err_shared: + guc_shared_objects_fini(guc); err_guc: i915_ggtt_disable_guc(dev_priv); @@ -459,9 +571,9 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) if (i915.enable_guc_submission) { gen9_disable_guc_interrupts(dev_priv); - i915_guc_submission_fini(dev_priv); } + guc_shared_objects_fini(&dev_priv->guc); i915_ggtt_disable_guc(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 731ec16..f935556 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -249,14 +249,14 @@ static inline void intel_guc_notify(struct intel_guc *guc) /* i915_guc_submission.c */ -int i915_guc_submission_init(struct drm_i915_private *dev_priv); +int i915_guc_submission_shared_objects_init(struct intel_guc *guc); int i915_guc_submission_enable(struct drm_i915_private *dev_priv); int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); void i915_guc_submission_disable(struct drm_i915_private *dev_priv); -void i915_guc_submission_fini(struct drm_i915_private *dev_priv); +void i915_guc_submission_shared_objects_fini(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); - +void i915_guc_policies_init(struct guc_policies *policies); /* intel_guc_log.c */ int intel_guc_log_create(struct intel_guc *guc); -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx