Some display MMIO transactions for offsets in the range that requires the DMC wakelock happen in atomic context (this has been confirmed during tests on PTL). That means that we need to use a non-sleeping variant of MMIO waiting function. Implement __intel_de_wait_for_register_atomic_nowl() and use it when waiting for acknowledgment of acquire/release. v2: - No __intel_de_wait_for_register_atomic_nowl() wrapper to convert i915 to display. (Jani) - Add a quick explanation why DMC_WAKELOCK_CTL_TIMEOUT_US is defined in microseconds. (Luca) Cc: Jani Nikula <jani.nikula@xxxxxxxxx> Cc: Luca Coelho <luciano.coelho@xxxxxxxxx> Signed-off-by: Gustavo Sousa <gustavo.sousa@xxxxxxxxx> --- drivers/gpu/drm/i915/display/intel_de.h | 10 +++++++++ drivers/gpu/drm/i915/display/intel_dmc_wl.c | 24 ++++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index bb51f974e9e2..4561de5d5e10 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -117,6 +117,16 @@ __intel_de_wait_for_register_nowl(struct intel_display *display, value, timeout); } +static inline int +__intel_de_wait_for_register_atomic_nowl(struct intel_display *display, + i915_reg_t reg, + u32 mask, u32 value, + unsigned int fast_timeout_us) +{ + return __intel_wait_for_register(__to_uncore(display), reg, mask, + value, fast_timeout_us, 0, NULL); +} + static inline int intel_de_wait(struct intel_display *display, i915_reg_t reg, u32 mask, u32 value, unsigned int timeout) diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c index 05892a237d3a..9255505437d5 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -39,7 +39,11 @@ * potential future use. */ -#define DMC_WAKELOCK_CTL_TIMEOUT 5 +/* + * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the + * atomic variant of waiting MMIO. + */ +#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000 #define DMC_WAKELOCK_HOLD_TIME 50 struct intel_dmc_wl_range { @@ -78,9 +82,9 @@ static void intel_dmc_wl_work(struct work_struct *work) __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); - if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL, - DMC_WAKELOCK_CTL_ACK, 0, - DMC_WAKELOCK_CTL_TIMEOUT)) { + if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, 0, + DMC_WAKELOCK_CTL_TIMEOUT_US)) { WARN_RATELIMIT(1, "DMC wakelock release timed out"); goto out_unlock; } @@ -217,10 +221,14 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg) __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ); - if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL, - DMC_WAKELOCK_CTL_ACK, - DMC_WAKELOCK_CTL_ACK, - DMC_WAKELOCK_CTL_TIMEOUT)) { + /* + * We need to use the atomic variant of the waiting routine + * because the DMC wakelock is also taken in atomic context. + */ + if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_TIMEOUT_US)) { WARN_RATELIMIT(1, "DMC wakelock ack timed out"); goto out_unlock; } -- 2.47.0