Insert a wait parameter in the code so we can possibly timeout on a seqno wait if need be. The code should be functionally the same as before because all the callers will continue to retry if an arbitrary timeout elapses. We'd like to have nanosecond granularity, but the only way to do this is with hrtimer, and that doesn't fit well with the needs of this code. v2: Fix rebase error (Chris) Return proper time even in wedged + signal case (Chris + Ben) Use timespec constructs (Ben) Didn't take Daniel's advice regarding the Frankenstein-ness of the function. I did try his advice, but in the end I liked the way the original code looked, better. v3: Make wakeups far less frequent for infinite waits (Chris) Signed-off-by: Ben Widawsky <ben at bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem.c | 66 ++++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 293f573..70634cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1819,34 +1819,78 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, - bool interruptible) + bool interruptible, struct timespec *timeout) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - int ret = 0; + struct timespec temp, dummy_time={1,0}; + unsigned long before, timeout_jiffies; + long end; + bool wait_forever = false; if (i915_seqno_passed(ring->get_seqno(ring), seqno)) return 0; trace_i915_gem_request_wait_begin(ring, seqno); + + if (timeout == NULL) { + timeout = &dummy_time; + wait_forever = true; + } + timeout_jiffies = timespec_to_jiffies(timeout); + if (WARN_ON(!ring->irq_get(ring))) return -ENODEV; + /* Record current jiffies in case interrupted by signal, or wedged * */ + before = jiffies; + #define EXIT_COND \ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ atomic_read(&dev_priv->mm.wedged)) + do { + if (interruptible) + end = wait_event_interruptible_timeout(ring->irq_queue, + EXIT_COND, + timeout_jiffies); + else + end = wait_event_timeout(ring->irq_queue, EXIT_COND, + timeout_jiffies); - if (interruptible) - ret = wait_event_interruptible(ring->irq_queue, - EXIT_COND); - else - wait_event(ring->irq_queue, EXIT_COND); + if (atomic_read(&dev_priv->mm.wedged)) + end = -EAGAIN; + } while(end == 0 && wait_forever); + + jiffies_to_timespec(jiffies - before, &temp); ring->irq_put(ring); trace_i915_gem_request_wait_end(ring, seqno); #undef EXIT_COND - return ret; + jiffies_to_timespec(end, timeout); + + switch (end) { + case -EAGAIN: /* Wedged */ + case -ERESTARTSYS: /* Signal */ + WARN_ON(timespec_compare(&temp, timeout) >= 0); + *timeout = timespec_sub(*timeout, temp); + return (int)end; + case 0: /* Tiemout */ + return -ETIME; + default: /* Completed */ + WARN_ON(end < 0); /* We're not aware of other errors */ + return 0; + } } /** @@ -1891,9 +1935,7 @@ i915_wait_request(struct intel_ring_buffer *ring, seqno = request->seqno; } - ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); - if (atomic_read(&dev_priv->mm.wedged)) - ret = -EAGAIN; + ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); return ret; } @@ -2981,7 +3023,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (seqno == 0) return 0; - ret = __wait_seqno(ring, seqno, true); + ret = __wait_seqno(ring, seqno, true, NULL); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); -- 1.7.10