sandybride_pcode is another sideband, so move it to their new home. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 10 -- drivers/gpu/drm/i915/intel_hdcp.c | 3 +- drivers/gpu/drm/i915/intel_pm.c | 194 ---------------------------------- drivers/gpu/drm/i915/intel_sideband.c | 194 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_sideband.h | 10 ++ 5 files changed, 206 insertions(+), 205 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa90e7214296..ad6c8abaab1a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3422,16 +3422,6 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); -int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, - u32 val, int fast_timeout_us, - int slow_timeout_ms); -#define sandybridge_pcode_write(dev_priv, mbox, val) \ - sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0) - -int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, - u32 reply_mask, u32 reply, int timeout_base_ms); - /* intel_dpio_phy.c */ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 81259a4fbdfd..18d06dde3fe5 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -11,8 +11,9 @@ #include <linux/i2c.h> #include <linux/random.h> -#include "intel_drv.h" #include "i915_reg.h" +#include "intel_drv.h" +#include "intel_sideband.h" #define KEY_LOAD_TRIES 5 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6259c95ce293..447811c5be35 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -9159,200 +9159,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv) } } -static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv, - u32 mbox) -{ - switch (mbox & GEN6_PCODE_ERROR_MASK) { - case GEN6_PCODE_SUCCESS: - return 0; - case GEN6_PCODE_UNIMPLEMENTED_CMD: - return -ENODEV; - case GEN6_PCODE_ILLEGAL_CMD: - return -ENXIO; - case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - return -EOVERFLOW; - case GEN6_PCODE_TIMEOUT: - return -ETIMEDOUT; - default: - MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); - return 0; - } -} - -static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv, - u32 mbox) -{ - switch (mbox & GEN6_PCODE_ERROR_MASK) { - case GEN6_PCODE_SUCCESS: - return 0; - case GEN6_PCODE_ILLEGAL_CMD: - return -ENXIO; - case GEN7_PCODE_TIMEOUT: - return -ETIMEDOUT; - case GEN7_PCODE_ILLEGAL_DATA: - return -EINVAL; - case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - return -EOVERFLOW; - default: - MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); - return 0; - } -} - -static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv, - u32 mbox, u32 *val, - int fast_timeout_us, - int slow_timeout_ms, - bool is_read) -{ - lockdep_assert_held(&dev_priv->sb_lock); - - /* - * GEN6_PCODE_* are outside of the forcewake domain, we can - * use te fw I915_READ variants to reduce the amount of work - * required when reading/writing. - */ - - if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) - return -EAGAIN; - - I915_WRITE_FW(GEN6_PCODE_DATA, *val); - I915_WRITE_FW(GEN6_PCODE_DATA1, 0); - I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (__intel_wait_for_register_fw(dev_priv, - GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, - fast_timeout_us, - slow_timeout_ms, - &mbox)) - return -ETIMEDOUT; - - if (is_read) - *val = I915_READ_FW(GEN6_PCODE_DATA); - - if (INTEL_GEN(dev_priv) > 6) - return gen7_check_mailbox_status(dev_priv, mbox); - else - return gen6_check_mailbox_status(dev_priv, mbox); -} - -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) -{ - int err; - - mutex_lock(&dev_priv->sb_lock); - err = __sandybridge_pcode_rw(dev_priv, mbox, val, - 500, 0, - true); - mutex_unlock(&dev_priv->sb_lock); - - if (err) { - DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", - mbox, __builtin_return_address(0), err); - } - - return err; -} - -int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, - u32 mbox, u32 val, - int fast_timeout_us, - int slow_timeout_ms) -{ - int err; - - mutex_lock(&dev_priv->sb_lock); - err = __sandybridge_pcode_rw(dev_priv, mbox, &val, - fast_timeout_us, slow_timeout_ms, - false); - mutex_unlock(&dev_priv->sb_lock); - - if (err) { - DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", - val, mbox, __builtin_return_address(0), err); - } - - return err; -} - -static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, - u32 request, u32 reply_mask, u32 reply, - u32 *status) -{ - *status = __sandybridge_pcode_rw(dev_priv, mbox, &request, - 500, 0, - true); - - return *status || ((request & reply_mask) == reply); -} - -/** - * skl_pcode_request - send PCODE request until acknowledgment - * @dev_priv: device private - * @mbox: PCODE mailbox ID the request is targeted for - * @request: request ID - * @reply_mask: mask used to check for request acknowledgment - * @reply: value used to check for request acknowledgment - * @timeout_base_ms: timeout for polling with preemption enabled - * - * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE - * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. - * The request is acknowledged once the PCODE reply dword equals @reply after - * applying @reply_mask. Polling is first attempted with preemption enabled - * for @timeout_base_ms and if this times out for another 50 ms with - * preemption disabled. - * - * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some - * other error as reported by PCODE. - */ -int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, - u32 reply_mask, u32 reply, int timeout_base_ms) -{ - u32 status; - int ret; - - mutex_lock(&dev_priv->sb_lock); - -#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ - &status) - - /* - * Prime the PCODE by doing a request first. Normally it guarantees - * that a subsequent request, at most @timeout_base_ms later, succeeds. - * _wait_for() doesn't guarantee when its passed condition is evaluated - * first, so send the first request explicitly. - */ - if (COND) { - ret = 0; - goto out; - } - ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); - if (!ret) - goto out; - - /* - * The above can time out if the number of requests was low (2 in the - * worst case) _and_ PCODE was busy for some reason even after a - * (queued) request and @timeout_base_ms delay. As a workaround retry - * the poll with preemption disabled to maximize the number of - * requests. Increase the timeout from @timeout_base_ms to 50ms to - * account for interrupts that could reduce the number of these - * requests, and for any quirks of the PCODE firmware that delays - * the request completion. - */ - DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); - WARN_ON_ONCE(timeout_base_ms > 3); - preempt_disable(); - ret = wait_for_atomic(COND, 50); - preempt_enable(); - -out: - mutex_unlock(&dev_priv->sb_lock); - return ret ? ret : status; -#undef COND -} - static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) { struct intel_rps *rps = &dev_priv->gt_pm.rps; diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index e5faebb511ae..b84cbff29a73 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -384,3 +384,197 @@ void vlv_flisdsi_put(struct drm_i915_private *dev_priv) { vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_FLISDSI)); } + +static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv, + u32 mbox) +{ + switch (mbox & GEN6_PCODE_ERROR_MASK) { + case GEN6_PCODE_SUCCESS: + return 0; + case GEN6_PCODE_UNIMPLEMENTED_CMD: + return -ENODEV; + case GEN6_PCODE_ILLEGAL_CMD: + return -ENXIO; + case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + return -EOVERFLOW; + case GEN6_PCODE_TIMEOUT: + return -ETIMEDOUT; + default: + MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); + return 0; + } +} + +static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv, + u32 mbox) +{ + switch (mbox & GEN6_PCODE_ERROR_MASK) { + case GEN6_PCODE_SUCCESS: + return 0; + case GEN6_PCODE_ILLEGAL_CMD: + return -ENXIO; + case GEN7_PCODE_TIMEOUT: + return -ETIMEDOUT; + case GEN7_PCODE_ILLEGAL_DATA: + return -EINVAL; + case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + return -EOVERFLOW; + default: + MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); + return 0; + } +} + +static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv, + u32 mbox, u32 *val, + int fast_timeout_us, + int slow_timeout_ms, + bool is_read) +{ + lockdep_assert_held(&dev_priv->sb_lock); + + /* + * GEN6_PCODE_* are outside of the forcewake domain, we can + * use te fw I915_READ variants to reduce the amount of work + * required when reading/writing. + */ + + if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) + return -EAGAIN; + + I915_WRITE_FW(GEN6_PCODE_DATA, *val); + I915_WRITE_FW(GEN6_PCODE_DATA1, 0); + I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + + if (__intel_wait_for_register_fw(dev_priv, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, + fast_timeout_us, + slow_timeout_ms, + &mbox)) + return -ETIMEDOUT; + + if (is_read) + *val = I915_READ_FW(GEN6_PCODE_DATA); + + if (INTEL_GEN(dev_priv) > 6) + return gen7_check_mailbox_status(dev_priv, mbox); + else + return gen6_check_mailbox_status(dev_priv, mbox); +} + +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) +{ + int err; + + mutex_lock(&dev_priv->sb_lock); + err = __sandybridge_pcode_rw(dev_priv, mbox, val, + 500, 0, + true); + mutex_unlock(&dev_priv->sb_lock); + + if (err) { + DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", + mbox, __builtin_return_address(0), err); + } + + return err; +} + +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, + u32 mbox, u32 val, + int fast_timeout_us, + int slow_timeout_ms) +{ + int err; + + mutex_lock(&dev_priv->sb_lock); + err = __sandybridge_pcode_rw(dev_priv, mbox, &val, + fast_timeout_us, slow_timeout_ms, + false); + mutex_unlock(&dev_priv->sb_lock); + + if (err) { + DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", + val, mbox, __builtin_return_address(0), err); + } + + return err; +} + +static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status) +{ + *status = __sandybridge_pcode_rw(dev_priv, mbox, &request, + 500, 0, + true); + + return *status || ((request & reply_mask) == reply); +} + +/** + * skl_pcode_request - send PCODE request until acknowledgment + * @dev_priv: device private + * @mbox: PCODE mailbox ID the request is targeted for + * @request: request ID + * @reply_mask: mask used to check for request acknowledgment + * @reply: value used to check for request acknowledgment + * @timeout_base_ms: timeout for polling with preemption enabled + * + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. + * The request is acknowledged once the PCODE reply dword equals @reply after + * applying @reply_mask. Polling is first attempted with preemption enabled + * for @timeout_base_ms and if this times out for another 50 ms with + * preemption disabled. + * + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some + * other error as reported by PCODE. + */ +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + u32 status; + int ret; + + mutex_lock(&dev_priv->sb_lock); + +#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ + &status) + + /* + * Prime the PCODE by doing a request first. Normally it guarantees + * that a subsequent request, at most @timeout_base_ms later, succeeds. + * _wait_for() doesn't guarantee when its passed condition is evaluated + * first, so send the first request explicitly. + */ + if (COND) { + ret = 0; + goto out; + } + ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); + if (!ret) + goto out; + + /* + * The above can time out if the number of requests was low (2 in the + * worst case) _and_ PCODE was busy for some reason even after a + * (queued) request and @timeout_base_ms delay. As a workaround retry + * the poll with preemption disabled to maximize the number of + * requests. Increase the timeout from @timeout_base_ms to 50ms to + * account for interrupts that could reduce the number of these + * requests, and for any quirks of the PCODE firmware that delays + * the request completion. + */ + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); + WARN_ON_ONCE(timeout_base_ms > 3); + preempt_disable(); + ret = wait_for_atomic(COND, 50); + preempt_enable(); + +out: + mutex_unlock(&dev_priv->sb_lock); + return ret ? ret : status; +#undef COND +} diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h index 46e917dd3973..684d6cd5df30 100644 --- a/drivers/gpu/drm/i915/intel_sideband.h +++ b/drivers/gpu/drm/i915/intel_sideband.h @@ -68,4 +68,14 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, enum intel_sbi_destination destination); +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, + u32 val, int fast_timeout_us, + int slow_timeout_ms); +#define sandybridge_pcode_write(dev_priv, mbox, val) \ + sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0) + +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); + #endif /* _INTEL_SIDEBAND_H */ -- 2.16.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx