On Thu, Nov 12, 2015 at 01:40:33PM -0700, Jens Axboe wrote: > On 11/12/2015 01:36 PM, Jens Axboe wrote: > >Hi, > > > >So a few months ago I got an XPS13 laptop, the one with the high res > >screen. GUI performance was never really that great, I attributed it to > >coming from a more powerful laptop, and the i915 driving a lot of > >pixels. But yesterday I browsed from my wife's macbook, and was blown > >away. Wow, scrolling in chrome SUCKS on the xps13. Not just scrolling, > >basically anything in chrome. Molasses. So I got sick of it, fired up a > >quick perf record, did a bunch of stuff in chrome. No super smoking > >guns, but one thing did stick out - the path leading to > >__i915_spin_request(). smoking gun pointing at the messenger normally. > >So today, I figured I'd try just killing that spin. If it fails, we'll > >punt to normal completions, so easy change. And wow, MASSIVE difference. > >I can now scroll in chrome and not rage! It's like the laptop is 10x > >faster now. > > > >Ran git blame, and found: > > > >commit 2def4ad99befa25775dd2f714fdd4d92faec6e34 > >Author: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > >Date: Tue Apr 7 16:20:41 2015 +0100 > > > > drm/i915: Optimistically spin for the request completion > > > >and read the commit message. Doesn't sound that impressive. Especially > >not for something that screws up interactive performance by a LOT. > > > >What's the deal? Revert? The tests that it improved the most were the latency sensitive tests and since my Broadwell xps13 behaves itself, I'd like to understand how it culminates in an interactivity loss. 1. Maybe it is the uninterruptible nature of the polling, making X's SIGIO jerky: gitt a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19e8f5442cf8..8099c2a9f88e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1146,7 +1146,7 @@ static bool missed_irq(struct drm_i915_private *dev_priv, return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } -static int __i915_spin_request(struct drm_i915_gem_request *req) +static int __i915_spin_request(struct drm_i915_gem_request *req, int state) { unsigned long timeout; @@ -1161,6 +1161,9 @@ static int __i915_spin_request(struct drm_i915_gem_request *req) if (time_after_eq(jiffies, timeout)) break; + if (signal_pending_state(state, current)) + break; + cpu_relax_lowlatency(); } if (i915_gem_request_completed(req, false)) @@ -1197,6 +1200,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); + int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before, now; @@ -1221,7 +1225,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req); + ret = __i915_spin_request(req, state); if (ret == 0) goto out; @@ -1233,8 +1237,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, for (;;) { struct timer_list timer; - prepare_to_wait(&ring->irq_queue, &wait, - interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + prepare_to_wait(&ring->irq_queue, &wait, state); /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ @@ -1252,7 +1255,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, break; } - if (interruptible && signal_pending(current)) { + if (signal_pending_state(state, current)) { ret = -ERESTARTSYS; break; } 2. Or maybe it is increased mutex contention: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a275b0478200..1e52a7444e0c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2973,9 +2973,11 @@ void __i915_add_request(struct drm_i915_gem_request *req, __i915_add_request(req, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, - bool interruptible, + unsigned flags, s64 *timeout, struct intel_rps_client *rps); +#define WAIT_INTERRUPTIBLE 0x1 +#define WAIT_UNLOCKED 0x2 int __must_check i915_wait_request(struct drm_i915_gem_request *req); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8099c2a9f88e..ce17d42f1c62 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1191,7 +1191,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) */ int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, - bool interruptible, + unsigned flags, s64 *timeout, struct intel_rps_client *rps) { @@ -1200,7 +1200,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); - int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; + int state = flags & WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before, now; @@ -1225,9 +1225,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req, state); - if (ret == 0) - goto out; + if (flags & WAIT_UNLOCKED) { + ret = __i915_spin_request(req, state); + if (ret == 0) + goto out; + } if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) { ret = -ENODEV; @@ -1244,7 +1246,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { /* ... but upgrade the -EAGAIN to an -EIO if the gpu * is truely gone. */ - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + flags & WAIT_INTERRUPTIBLE); if (ret == 0) ret = -EAGAIN; break; @@ -1532,7 +1535,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); for (i = 0; ret == 0 && i < n; i++) - ret = __i915_wait_request(requests[i], reset_counter, true, + ret = __i915_wait_request(requests[i], reset_counter, 0x3, NULL, rps); mutex_lock(&dev->struct_mutex); @@ -3067,7 +3070,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) for (i = 0; i < n; i++) { if (ret == 0) - ret = __i915_wait_request(req[i], reset_counter, true, + ret = __i915_wait_request(req[i], reset_counter, 0x3, args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); i915_gem_request_unreference__unlocked(req[i]); @@ -4043,7 +4046,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (target == NULL) return 0; - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); + ret = __i915_wait_request(target, reset_counter, 0x3, NULL, NULL); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); Or maybe it is an indirect effect, such as power balancing between the CPU and GPU, or just thermal throttling, or it may be the task being penalised for consuming its timeslice (for which any completion polling seems susceptible). > BTW, this: > > "Limit the spinning to a single jiffie (~1us) at most" > > is totally wrong. I have HZ=100 on my laptop. That's 10ms. 10ms! > Even if I had HZ=1000, that'd still be 1ms of spinning. That's > seriously screwed up, guys. That's over and above the termination condition for blk_poll(). -Chris -- Chris Wilson, Intel Open Source Technology Centre _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel