After the removal of DRI1, all access to the rings are through requests and so we can always be sure that there is a request to wait upon to free up available space. The fallback code only existed so that we could quiesce the GPU following unmediated access by DRI1. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_trace.h | 27 ---------------- drivers/gpu/drm/i915/intel_lrc.c | 57 +++------------------------------ drivers/gpu/drm/i915/intel_ringbuffer.c | 56 ++------------------------------ 3 files changed, 6 insertions(+), 134 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b3070a4501ab..97483e21c9b4 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -597,33 +597,6 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, TP_ARGS(req) ); -DECLARE_EVENT_CLASS(i915_ring, - TP_PROTO(struct intel_engine_cs *ring), - TP_ARGS(ring), - - TP_STRUCT__entry( - __field(u32, dev) - __field(u32, ring) - ), - - TP_fast_assign( - __entry->dev = ring->dev->primary->index; - __entry->ring = ring->id; - ), - - TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) -); - -DEFINE_EVENT(i915_ring, i915_ring_wait_begin, - TP_PROTO(struct intel_engine_cs *ring), - TP_ARGS(ring) -); - -DEFINE_EVENT(i915_ring, i915_ring_wait_end, - TP_PROTO(struct intel_engine_cs *ring), - TP_ARGS(ring) -); - TRACE_EVENT(i915_flip_request, TP_PROTO(int plane, struct drm_i915_gem_object *obj), diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 61c103b9ba22..3cd40699522e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -846,8 +846,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring, return 0; } -static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, - int bytes) +static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, + struct intel_context *ctx, + int bytes) { struct intel_engine_cs *ring = ringbuf->ring; struct drm_i915_gem_request *request; @@ -873,7 +874,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, break; } - if (&request->list == &ring->request_list) + if (WARN_ON(&request->list == &ring->request_list)) return -ENOSPC; ret = i915_wait_request(request); @@ -884,56 +885,6 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, return 0; } -static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, - struct intel_context *ctx, - int bytes) -{ - struct intel_engine_cs *ring = ringbuf->ring; - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long end; - int ret; - - ret = logical_ring_wait_request(ringbuf, bytes); - if (ret != -ENOSPC) - return ret; - - /* Force the context submission in case we have been skipping it */ - intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL); - - /* With GEM the hangcheck timer should kick us out of the loop, - * leaving it early runs the risk of corrupting GEM state (due - * to running on almost untested codepaths). But on resume - * timers don't work yet, so prevent a complete hang in that - * case by choosing an insanely large timeout. */ - end = jiffies + 60 * HZ; - - ret = 0; - do { - if (intel_ring_space(ringbuf) >= bytes) - break; - - msleep(1); - - if (dev_priv->mm.interruptible && signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); - if (ret) - break; - - if (time_after(jiffies, end)) { - ret = -EBUSY; - break; - } - } while (1); - - return ret; -} - static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, struct intel_context *ctx) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a1184e700d1d..7e3281de417c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2057,7 +2057,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) ring->buffer = NULL; } -static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) +static int ring_wait_for_space(struct intel_engine_cs *ring, int n) { struct intel_ringbuffer *ringbuf = ring->buffer; struct drm_i915_gem_request *request; @@ -2074,7 +2074,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) break; } - if (&request->list == &ring->request_list) + if (WARN_ON(&request->list == &ring->request_list)) return -ENOSPC; ret = i915_wait_request(request); @@ -2085,58 +2085,6 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) return 0; } -static int ring_wait_for_space(struct intel_engine_cs *ring, int n) -{ - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ringbuffer *ringbuf = ring->buffer; - unsigned long end; - int ret; - - ret = intel_ring_wait_request(ring, n); - if (ret != -ENOSPC) - return ret; - - /* force the tail write in case we have been skipping them */ - __intel_ring_advance(ring); - - /* With GEM the hangcheck timer should kick us out of the loop, - * leaving it early runs the risk of corrupting GEM state (due - * to running on almost untested codepaths). But on resume - * timers don't work yet, so prevent a complete hang in that - * case by choosing an insanely large timeout. */ - end = jiffies + 60 * HZ; - - ret = 0; - trace_i915_ring_wait_begin(ring); - do { - if (intel_ring_space(ringbuf) >= n) - break; - ringbuf->head = I915_READ_HEAD(ring); - if (intel_ring_space(ringbuf) >= n) - break; - - msleep(1); - - if (dev_priv->mm.interruptible && signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); - if (ret) - break; - - if (time_after(jiffies, end)) { - ret = -EBUSY; - break; - } - } while (1); - trace_i915_ring_wait_end(ring); - return ret; -} - static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) { uint32_t __iomem *virt; -- 2.1.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx