s/guc_init_send_regs/intel_guc_init_send_regs. Calling intel_guc_init_send_regs from intel_uc_init_hw. Moved below functions to intel_guc.c from intel_uc.c 1. guc_send_regs 2. intel_guc_init_send_regs 3. intel_guc_send_mmio Cc: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> Cc: Michał Winiarski <michal.winiarski@xxxxxxxxx> Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@xxxxxxxxx> --- drivers/gpu/drm/i915/intel_guc.c | 84 ++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_guc.h | 2 + drivers/gpu/drm/i915/intel_uc.c | 87 +--------------------------------------- drivers/gpu/drm/i915/intel_uc.h | 1 - 4 files changed, 88 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 0c62cc2..02b8251 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -23,6 +23,7 @@ */ #include "i915_drv.h" +#include "intel_guc.h" int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len) { @@ -45,3 +46,86 @@ void intel_guc_init_early(struct intel_guc *guc) guc->send = intel_guc_send_nop; guc->notify = gen8_guc_raise_irq; } + +static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) +{ + GEM_BUG_ON(!guc->send_regs.base); + GEM_BUG_ON(!guc->send_regs.count); + GEM_BUG_ON(i >= guc->send_regs.count); + + return _MMIO(guc->send_regs.base + 4 * i); +} + +void intel_guc_init_send_regs(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + enum forcewake_domains fw_domains = 0; + unsigned int i; + + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; + + for (i = 0; i < guc->send_regs.count; i++) { + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + guc_send_reg(guc, i), + FW_REG_READ | FW_REG_WRITE); + } + guc->send_regs.fw_domains = fw_domains; +} + +/* + * This function implements the MMIO based host to GuC interface. + */ +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 status; + int i; + int ret; + + GEM_BUG_ON(!len); + GEM_BUG_ON(len > guc->send_regs.count); + + /* If CT is available, we expect to use MMIO only during init/fini */ + GEM_BUG_ON(HAS_GUC_CT(dev_priv) && + *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && + *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); + + mutex_lock(&guc->send_mutex); + intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains); + + for (i = 0; i < len; i++) + I915_WRITE(guc_send_reg(guc, i), action[i]); + + POSTING_READ(guc_send_reg(guc, i - 1)); + + intel_guc_notify(guc); + + /* + * No GuC command should ever take longer than 10ms. + * Fast commands should still complete in 10us. + */ + ret = __intel_wait_for_register_fw(dev_priv, + guc_send_reg(guc, 0), + INTEL_GUC_RECV_MASK, + INTEL_GUC_RECV_MASK, + 10, 10, &status); + if (status != INTEL_GUC_STATUS_SUCCESS) { + /* + * Either the GuC explicitly returned an error (which + * we convert to -EIO here) or no response at all was + * received within the timeout limit (-ETIMEDOUT) + */ + if (ret != -ETIMEDOUT) + ret = -EIO; + + DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;" + " ret=%d status=0x%08X response=0x%08X\n", + action[0], ret, status, I915_READ(SOFT_SCRATCH(15))); + } + + intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains); + mutex_unlock(&guc->send_mutex); + + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 76b7113..7860b8f 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -39,5 +39,7 @@ static inline void intel_guc_notify(struct intel_guc *guc) /* intel_guc.c */ int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len); void intel_guc_init_early(struct intel_guc *guc); +void intel_guc_init_send_regs(struct intel_guc *guc); +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); #endif diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 9ff5c97..700ec70 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -250,32 +250,6 @@ void intel_uc_fini_fw(struct drm_i915_private *dev_priv) __intel_uc_fw_fini(&dev_priv->huc.fw); } -static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) -{ - GEM_BUG_ON(!guc->send_regs.base); - GEM_BUG_ON(!guc->send_regs.count); - GEM_BUG_ON(i >= guc->send_regs.count); - - return _MMIO(guc->send_regs.base + 4 * i); -} - -static void guc_init_send_regs(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - enum forcewake_domains fw_domains = 0; - unsigned int i; - - guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; - - for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - guc_send_reg(guc, i), - FW_REG_READ | FW_REG_WRITE); - } - guc->send_regs.fw_domains = fw_domains; -} - static void guc_capture_load_err_log(struct intel_guc *guc) { if (!guc->log.vma || i915.guc_log_level < 0) @@ -297,8 +271,6 @@ static int guc_enable_communication(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - guc_init_send_regs(guc); - if (HAS_GUC_CT(dev_priv)) return intel_guc_enable_ct(guc); @@ -374,6 +346,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) if (ret) goto err_log_capture; + intel_guc_init_send_regs(guc); + ret = guc_enable_communication(guc); if (ret) goto err_log_capture; @@ -447,63 +421,6 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) i915_ggtt_disable_guc(dev_priv); } -/* - * This function implements the MMIO based host to GuC interface. - */ -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 status; - int i; - int ret; - - GEM_BUG_ON(!len); - GEM_BUG_ON(len > guc->send_regs.count); - - /* If CT is available, we expect to use MMIO only during init/fini */ - GEM_BUG_ON(HAS_GUC_CT(dev_priv) && - *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && - *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); - - mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains); - - for (i = 0; i < len; i++) - I915_WRITE(guc_send_reg(guc, i), action[i]); - - POSTING_READ(guc_send_reg(guc, i - 1)); - - intel_guc_notify(guc); - - /* - * No GuC command should ever take longer than 10ms. - * Fast commands should still complete in 10us. - */ - ret = __intel_wait_for_register_fw(dev_priv, - guc_send_reg(guc, 0), - INTEL_GUC_RECV_MASK, - INTEL_GUC_RECV_MASK, - 10, 10, &status); - if (status != INTEL_GUC_STATUS_SUCCESS) { - /* - * Either the GuC explicitly returned an error (which - * we convert to -EIO here) or no response at all was - * received within the timeout limit (-ETIMEDOUT) - */ - if (ret != -ETIMEDOUT) - ret = -EIO; - - DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;" - " ret=%d status=0x%08X response=0x%08X\n", - action[0], ret, status, I915_READ(SOFT_SCRATCH(15))); - } - - intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains); - mutex_unlock(&guc->send_mutex); - - return ret; -} - int intel_guc_sample_forcewake(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 057ef7e..6da5f1c 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -209,7 +209,6 @@ struct intel_huc { int intel_uc_init_hw(struct drm_i915_private *dev_priv); void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_guc_sample_forcewake(struct intel_guc *guc); -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); /* intel_guc_loader.c */ int intel_guc_select_fw(struct intel_guc *guc); -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx