From: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> Manual pointer manipulation is error prone. Let compiler calculate right offsets for us in case we need to change ads layout. v2: don't call it object (Chris) v3: restyle offset assignments (Chris) v4: stylistic reductions Signed-off-by: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> Cc: Oscar Mateo <oscar.mateo@xxxxxxxxx> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@xxxxxxxxx> Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_guc_submission.c | 51 ++++++++++++++---------------- 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index b4d24cd7639a..3651a1b0d13c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -806,26 +806,27 @@ static void guc_policies_init(struct guc_policies *policies) policies->is_valid = 1; } +#define p_offset(ptr, member) offsetof(typeof(*ptr), member) + static void guc_addon_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); struct i915_vma *vma; - struct guc_ads *ads; - struct guc_policies *policies; - struct guc_mmio_reg_state *reg_state; - struct intel_engine_cs *engine; - enum intel_engine_id id; struct page *page; - u32 size; - /* The ads obj includes the struct itself and buffers passed to GuC */ - size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + - sizeof(struct guc_mmio_reg_state) + - GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; + struct { + struct guc_ads ads; + struct guc_policies policies; + struct guc_mmio_reg_state reg_state; + u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; + } __packed *blob; + u32 offset; + struct intel_engine_cs *engine; + enum intel_engine_id id; vma = guc->ads_vma; if (!vma) { - vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size)); + vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); if (IS_ERR(vma)) return; @@ -833,7 +834,8 @@ static void guc_addon_create(struct intel_guc *guc) } page = i915_vma_first_page(vma); - ads = kmap(page); + blob = kmap(page); + offset = i915_ggtt_offset(vma); /* * The GuC requires a "Golden Context" when it reinitialises @@ -843,34 +845,27 @@ static void guc_addon_create(struct intel_guc *guc) * to find it. */ engine = dev_priv->engine[RCS]; - ads->golden_context_lrca = engine->status_page.ggtt_offset; + blob->ads.golden_context_lrca = engine->status_page.ggtt_offset; for_each_engine(engine, dev_priv, id) - ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); + blob->ads.eng_state_size[engine->guc_id] = + intel_lr_context_size(engine); /* GuC scheduling policies */ - policies = (void *)ads + sizeof(struct guc_ads); - guc_policies_init(policies); - - ads->scheduler_policies = - guc_ggtt_offset(vma) + sizeof(struct guc_ads); + guc_policies_init(&blob->policies); /* MMIO reg state */ - reg_state = (void *)policies + sizeof(struct guc_policies); - for_each_engine(engine, dev_priv, id) { - reg_state->mmio_white_list[engine->guc_id].mmio_start = + blob->reg_state.mmio_white_list[engine->guc_id].mmio_start = engine->mmio_base + GUC_MMIO_WHITE_LIST_START; /* Nothing to be saved or restored for now. */ - reg_state->mmio_white_list[engine->guc_id].count = 0; + blob->reg_state.mmio_white_list[engine->guc_id].count = 0; } - ads->reg_state_addr = ads->scheduler_policies + - sizeof(struct guc_policies); - - ads->reg_state_buffer = ads->reg_state_addr + - sizeof(struct guc_mmio_reg_state); + blob->ads.scheduler_policies = offset + p_offset(blob, policies); + blob->ads.reg_state_addr = offset + p_offset(blob, reg_state); + blob->ads.reg_state_buffer = offset + p_offset(blob, reg_state_buffer); kunmap(page); } -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx