On Tue, Dec 01, 2015 at 11:05:34AM +0000, Chris Wilson wrote: > As the request is only valid during the same global reset epoch, we can > record the current reset_counter when constructing the request and reuse > it when waiting upon that request in future. This removes a very hairy > atomic check serialised by the struct_mutex at the time of waiting and > allows us to transfer those waits to a central dispatcher for all > waiters and all requests. Please add "This also allows us to clean up the check_wedge handling a bit, since we don't have to be super-careful any more with running into a zombie gpu. There's now only two places we check for a wedge gpu: request_alloc (to catch command submission) and in the inner loop of wait_request (no point bailing out if we don't have to wait anyway)." With that added Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 2 +- > drivers/gpu/drm/i915/i915_gem.c | 40 +++++++++++---------------------- > drivers/gpu/drm/i915/intel_display.c | 7 +----- > drivers/gpu/drm/i915/intel_lrc.c | 7 ------ > drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ----- > 5 files changed, 15 insertions(+), 47 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 2a4cfa06c28d..ee7677343056 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2181,6 +2181,7 @@ struct drm_i915_gem_request { > /** On Which ring this request was generated */ > struct drm_i915_private *i915; > struct intel_engine_cs *ring; > + unsigned reset_counter; > > /** GEM sequence number associated with the previous request, > * when the HWS breadcrumb is equal to this the GPU is processing > @@ -3049,7 +3050,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, > #define i915_add_request_no_flush(req) \ > __i915_add_request(req, NULL, false) > int __i915_wait_request(struct drm_i915_gem_request *req, > - unsigned reset_counter, > bool interruptible, > s64 *timeout, > struct intel_rps_client *rps); > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index bff245de8ade..68d4bab93fd8 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1223,7 +1223,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) > /** > * __i915_wait_request - wait until execution of request has finished > * @req: duh! > - * @reset_counter: reset sequence associated with the given request > * @interruptible: do an interruptible wait (normally yes) > * @timeout: in - how long to wait (NULL forever); out - how much time remaining > * > @@ -1238,7 +1237,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) > * errno with remaining time filled in timeout argument. > */ > int __i915_wait_request(struct drm_i915_gem_request *req, > - unsigned reset_counter, > bool interruptible, > s64 *timeout, > struct intel_rps_client *rps) > @@ -1297,7 +1295,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, > > /* We need to check whether any gpu reset happened in between > * the caller grabbing the seqno and now ... */ > - if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { > + if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { > /* ... but upgrade the -EAGAIN to an -EIO if the gpu > * is truely gone. */ > ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); > @@ -1470,13 +1468,7 @@ i915_wait_request(struct drm_i915_gem_request *req) > > BUG_ON(!mutex_is_locked(&dev->struct_mutex)); > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); > - if (ret) > - return ret; > - > - ret = __i915_wait_request(req, > - i915_reset_counter(&dev_priv->gpu_error), > - interruptible, NULL, NULL); > + ret = __i915_wait_request(req, interruptible, NULL, NULL); > if (ret) > return ret; > > @@ -1551,7 +1543,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_gem_request *requests[I915_NUM_RINGS]; > - unsigned reset_counter; > int ret, i, n = 0; > > BUG_ON(!mutex_is_locked(&dev->struct_mutex)); > @@ -1560,12 +1551,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > if (!obj->active) > return 0; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); > - if (ret) > - return ret; > - > - reset_counter = i915_reset_counter(&dev_priv->gpu_error); > - > if (readonly) { > struct drm_i915_gem_request *req; > > @@ -1587,9 +1572,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > } > > mutex_unlock(&dev->struct_mutex); > + ret = 0; > for (i = 0; ret == 0 && i < n; i++) > - ret = __i915_wait_request(requests[i], reset_counter, true, > - NULL, rps); > + ret = __i915_wait_request(requests[i], true, NULL, rps); > mutex_lock(&dev->struct_mutex); > > for (i = 0; i < n; i++) { > @@ -2693,6 +2678,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > struct drm_i915_gem_request **req_out) > { > struct drm_i915_private *dev_priv = to_i915(ring->dev); > + unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); > struct drm_i915_gem_request *req; > int ret; > > @@ -2701,6 +2687,11 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > > *req_out = NULL; > > + ret = i915_gem_check_wedge(&dev_priv->gpu_error, > + dev_priv->mm.interruptible); > + if (ret) > + return ret; > + > req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); > if (req == NULL) > return -ENOMEM; > @@ -2712,6 +2703,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > kref_init(&req->ref); > req->i915 = dev_priv; > req->ring = ring; > + req->reset_counter = reset_counter; > req->ctx = ctx; > i915_gem_context_reference(req->ctx); > > @@ -3072,11 +3064,9 @@ retire: > int > i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > { > - struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_gem_wait *args = data; > struct drm_i915_gem_object *obj; > struct drm_i915_gem_request *req[I915_NUM_RINGS]; > - unsigned reset_counter; > int i, n = 0; > int ret; > > @@ -3110,7 +3100,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > } > > drm_gem_object_unreference(&obj->base); > - reset_counter = i915_reset_counter(&dev_priv->gpu_error); > > for (i = 0; i < I915_NUM_RINGS; i++) { > if (obj->last_read_req[i] == NULL) > @@ -3123,7 +3112,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > > for (i = 0; i < n; i++) { > if (ret == 0) > - ret = __i915_wait_request(req[i], reset_counter, true, > + ret = __i915_wait_request(req[i], true, > args->timeout_ns > 0 ? &args->timeout_ns : NULL, > file->driver_priv); > i915_gem_request_unreference__unlocked(req[i]); > @@ -3155,7 +3144,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, > if (!i915_semaphore_is_enabled(obj->base.dev)) { > struct drm_i915_private *i915 = to_i915(obj->base.dev); > ret = __i915_wait_request(from_req, > - i915_reset_counter(&i915->gpu_error), > i915->mm.interruptible, > NULL, > &i915->rps.semaphores); > @@ -4084,7 +4072,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > struct drm_i915_file_private *file_priv = file->driver_priv; > unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; > struct drm_i915_gem_request *request, *target = NULL; > - unsigned reset_counter; > int ret; > > ret = i915_gem_wait_for_error(&dev_priv->gpu_error); > @@ -4109,7 +4096,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > > target = request; > } > - reset_counter = i915_reset_counter(&dev_priv->gpu_error); > if (target) > i915_gem_request_reference(target); > spin_unlock(&file_priv->mm.lock); > @@ -4117,7 +4103,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > if (target == NULL) > return 0; > > - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); > + ret = __i915_wait_request(target, true, NULL, NULL); > if (ret == 0) > queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); > > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index 511ead08ccd8..4447e73b54db 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -11313,7 +11313,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work) > > if (mmio_flip->req) { > WARN_ON(__i915_wait_request(mmio_flip->req, > - mmio_flip->crtc->reset_counter, > false, NULL, > &mmio_flip->i915->rps.mmioflips)); > i915_gem_request_unreference__unlocked(mmio_flip->req); > @@ -13305,9 +13304,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, > > ret = drm_atomic_helper_prepare_planes(dev, state); > if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { > - u32 reset_counter; > - > - reset_counter = i915_reset_counter(&dev_priv->gpu_error); > mutex_unlock(&dev->struct_mutex); > > for_each_plane_in_state(state, plane, plane_state, i) { > @@ -13318,8 +13314,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, > continue; > > ret = __i915_wait_request(intel_plane_state->wait_req, > - reset_counter, true, > - NULL, NULL); > + true, NULL, NULL); > > /* Swallow -EIO errors to allow updates during hw lockup. */ > if (ret == -EIO) > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 4ebafab53f30..fe71c768dbc4 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -819,16 +819,9 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) > */ > int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) > { > - struct drm_i915_private *dev_priv; > int ret; > > WARN_ON(req == NULL); > - dev_priv = req->ring->dev->dev_private; > - > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, > - dev_priv->mm.interruptible); > - if (ret) > - return ret; > > ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); > if (ret) > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index 8970267d27bb..913526c0264f 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -2286,7 +2286,6 @@ int intel_ring_idle(struct intel_engine_cs *ring) > > /* Make sure we do not trigger any retires */ > return __i915_wait_request(req, > - i915_reset_counter(&to_i915(ring->dev)->gpu_error), > to_i915(ring->dev)->mm.interruptible, > NULL, NULL); > } > @@ -2417,11 +2416,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, > ring = req->ring; > dev_priv = ring->dev->dev_private; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, > - dev_priv->mm.interruptible); > - if (ret) > - return ret; > - > ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); > if (ret) > return ret; > -- > 2.6.2 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx