This is principally a little bit of syntatic sugar to hide the atomic_read()s throughout the code to retrieve the current reset_counter. It also provides the other utility functions to check the reset state on the already read reset_counter, so that (in later patches) we can read it once and do multiple tests rather than risk the value changing between tests. v2: Be strictly on converting existing i915_reset_in_progress() over to the more verbose i915_reset_in_progress_or_wedged(). Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 32 ++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++-------- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++------- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- 7 files changed, 53 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e3377abc0d4d..932af05b8eec 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4696,7 +4696,7 @@ i915_wedged_get(void *data, u64 *val) struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; - *val = atomic_read(&dev_priv->gpu_error.reset_counter); + *val = i915_reset_counter(&dev_priv->gpu_error); return 0; } @@ -4715,7 +4715,7 @@ i915_wedged_set(void *data, u64 val) * while it is writing to 'i915_wedged' */ - if (i915_reset_in_progress(&dev_priv->gpu_error)) + if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) return -EAGAIN; intel_runtime_pm_get(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1a6168affadd..b274237726de 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2983,20 +2983,44 @@ void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); +static inline u32 i915_reset_counter(struct i915_gpu_error *error) +{ + return atomic_read(&error->reset_counter); +} + +static inline bool __i915_reset_in_progress(u32 reset) +{ + return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); +} + +static inline bool __i915_reset_in_progress_or_wedged(u32 reset) +{ + return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); +} + +static inline bool __i915_terminally_wedged(u32 reset) +{ + return unlikely(reset & I915_WEDGED); +} + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { - return unlikely(atomic_read(&error->reset_counter) - & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); + return __i915_reset_in_progress(i915_reset_counter(error)); +} + +static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) +{ + return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); } static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { - return atomic_read(&error->reset_counter) & I915_WEDGED; + return __i915_terminally_wedged(i915_reset_counter(error)); } static inline u32 i915_reset_count(struct i915_gpu_error *error) { - return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; + return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; } static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 99fd6aa4dd62..78bf980a69bf 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -83,7 +83,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) { int ret; -#define EXIT_COND (!i915_reset_in_progress(error) || \ +#define EXIT_COND (!i915_reset_in_progress_or_wedged(error) || \ i915_terminally_wedged(error)) if (EXIT_COND) return 0; @@ -1111,7 +1111,7 @@ int i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible) { - if (i915_reset_in_progress(error)) { + if (i915_reset_in_progress_or_wedged(error)) { /* Non-interruptible callers can't handle -EAGAIN, hence return * -EIO unconditionally for these. */ if (!interruptible) @@ -1295,7 +1295,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ - if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { + if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { /* ... but upgrade the -EAGAIN to an -EIO if the gpu * is truely gone. */ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); @@ -1473,7 +1473,7 @@ i915_wait_request(struct drm_i915_gem_request *req) return ret; ret = __i915_wait_request(req, - atomic_read(&dev_priv->gpu_error.reset_counter), + i915_reset_counter(&dev_priv->gpu_error), interruptible, NULL, NULL); if (ret) return ret; @@ -1562,7 +1562,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, if (ret) return ret; - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + reset_counter = i915_reset_counter(&dev_priv->gpu_error); if (readonly) { struct drm_i915_gem_request *req; @@ -3115,7 +3115,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } drm_gem_object_unreference(&obj->base); - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + reset_counter = i915_reset_counter(&dev_priv->gpu_error); for (i = 0; i < I915_NUM_RINGS; i++) { if (obj->last_read_req[i] == NULL) @@ -3160,7 +3160,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (!i915_semaphore_is_enabled(obj->base.dev)) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, - atomic_read(&i915->gpu_error.reset_counter), + i915_reset_counter(&i915->gpu_error), i915->mm.interruptible, NULL, &i915->rps.semaphores); @@ -4128,7 +4128,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) target = request; } - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + reset_counter = i915_reset_counter(&dev_priv->gpu_error); if (target) i915_gem_request_reference(target); spin_unlock(&file_priv->mm.lock); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f04d799153ca..9a6b0ac54d01 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2484,7 +2484,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * the reset in-progress bit is only ever set by code outside of this * work we don't need to worry about any other races. */ - if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { + if (i915_reset_in_progress_or_wedged(error) && !i915_terminally_wedged(error)) { DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_event); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 959868c40018..0933bdbaa935 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3290,10 +3290,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned reset_counter; bool pending; - if (i915_reset_in_progress(&dev_priv->gpu_error) || - intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + reset_counter = i915_reset_counter(&dev_priv->gpu_error); + if (intel_crtc->reset_counter != reset_counter || + __i915_reset_in_progress_or_wedged(reset_counter)) return false; spin_lock_irq(&dev->event_lock); @@ -11006,9 +11008,11 @@ static bool page_flip_finished(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + unsigned reset_counter; - if (i915_reset_in_progress(&dev_priv->gpu_error) || - crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + reset_counter = i915_reset_counter(&dev_priv->gpu_error); + if (crtc->reset_counter != reset_counter || + __i915_reset_in_progress_or_wedged(reset_counter)) return true; /* @@ -11665,7 +11669,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup; atomic_inc(&intel_crtc->unpin_work_count); - intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error); if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; @@ -13499,10 +13503,10 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; ret = drm_atomic_helper_prepare_planes(dev, state); - if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { + if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { u32 reset_counter; - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + reset_counter = i915_reset_counter(&dev_priv->gpu_error); mutex_unlock(&dev->struct_mutex); for_each_plane_in_state(state, plane, plane_state, i) { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7f17ba852b8a..254ce14d790b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1011,7 +1011,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring) return; ret = intel_ring_idle(ring); - if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) + if (ret && !i915_reset_in_progress_or_wedged(&to_i915(ring->dev)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 339701d7a9a5..8c6b15ab652b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2274,7 +2274,7 @@ int intel_ring_idle(struct intel_engine_cs *ring) /* Make sure we do not trigger any retires */ return __i915_wait_request(req, - atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), + i915_reset_counter(&to_i915(ring->dev)->gpu_error), to_i915(ring->dev)->mm.interruptible, NULL, NULL); } @@ -3068,7 +3068,7 @@ intel_stop_ring_buffer(struct intel_engine_cs *ring) return; ret = intel_ring_idle(ring); - if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) + if (ret && !i915_reset_in_progress_or_wedged(&to_i915(ring->dev)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); -- 2.7.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx