If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from regularly boosting the RPS state, we only allow each client to receive one boost in each period of activity. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Kenneth Graunke <kenneth@xxxxxxxxxxxxx> Cc: Stéphane Marchesin <stephane.marchesin@xxxxxxxxx> Cc: Owen Taylor <otaylor@xxxxxxxxxx> Cc: "Meng, Mengmeng" <mengmeng.meng@xxxxxxxxx> Cc: "Zhuang, Lena" <lena.zhuang@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_dma.c | 16 +--- drivers/gpu/drm/i915/i915_drv.h | 19 ++++- drivers/gpu/drm/i915/i915_gem.c | 153 +++++++++++++++++++++++++---------- drivers/gpu/drm/i915/i915_irq.c | 11 --- drivers/gpu/drm/i915/intel_display.c | 3 + drivers/gpu/drm/i915/intel_drv.h | 2 + drivers/gpu/drm/i915/intel_pm.c | 34 +++----- 7 files changed, 147 insertions(+), 91 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 99159c9..883990f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1811,19 +1811,11 @@ int i915_driver_unload(struct drm_device *dev) int i915_driver_open(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *file_priv; - - DRM_DEBUG_DRIVER("\n"); - file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); - if (!file_priv) - return -ENOMEM; - - file->driver_priv = file_priv; - - spin_lock_init(&file_priv->mm.lock); - INIT_LIST_HEAD(&file_priv->mm.request_list); + int ret; - idr_init(&file_priv->context_idr); + ret = i915_gem_open(dev, file); + if (ret) + return ret; return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f626271..e8537d3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -829,9 +829,6 @@ struct intel_gen6_power_mgmt { struct work_struct work; u32 pm_iir; - /* On vlv we need to manually drop to Vmin with a delayed work. */ - struct delayed_work vlv_work; - /* The below variables an all the rps hw state are protected by * dev->struct mutext. */ u8 cur_delay; @@ -840,6 +837,8 @@ struct intel_gen6_power_mgmt { u8 rpe_delay; u8 hw_max; + atomic_t wait_boost; + struct delayed_work delayed_resume_work; /* @@ -947,6 +946,15 @@ struct i915_gem_mm { struct delayed_work retire_work; /** + * When we detect an idle GPU, we want to turn on + * powersaving features. So once we see that there + * are no more requests outstanding and no more + * arrive within a small period of time, we fire + * off the idle_work. + */ + struct delayed_work idle_work; + + /** * Are we in a non-interruptible section of code like * modesetting? */ @@ -1573,10 +1581,12 @@ struct drm_i915_file_private { struct { spinlock_t lock; struct list_head request_list; + struct delayed_work idle_work; } mm; struct idr context_idr; struct i915_ctx_hang_stats hang_stats; + atomic_t rps_wait_boost; }; #define INTEL_INFO(dev) (to_i915(dev)->info) @@ -1925,7 +1935,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) } } -void i915_gem_retire_requests(struct drm_device *dev); +bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); @@ -1978,6 +1988,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); +int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); bool diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a6a1ad0..033429ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1078,6 +1078,19 @@ static bool missed_irq(struct drm_i915_private *dev_priv, return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } +static bool can_wait_boost(struct drm_i915_private *dev_priv, + struct drm_i915_file_private *file_priv) +{ + bool already_boosted; + + if (file_priv) + already_boosted = atomic_xchg(&file_priv->rps_wait_boost, true); + else + already_boosted = atomic_xchg(&dev_priv->rps.wait_boost, true); + + return !already_boosted; +} + /** * __wait_seqno - wait until execution of seqno has finished * @ring: the ring expected to report seqno @@ -1098,7 +1111,9 @@ static bool missed_irq(struct drm_i915_private *dev_priv, */ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, unsigned reset_counter, - bool interruptible, struct timespec *timeout) + bool interruptible, + struct timespec *timeout, + struct drm_i915_file_private *file_priv) { drm_i915_private_t *dev_priv = ring->dev->dev_private; struct timespec before, now; @@ -1113,6 +1128,15 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1; + if (dev_priv->info->gen >= 6 && can_wait_boost(dev_priv, file_priv)) { + mutex_lock(&dev_priv->rps.hw_lock); + if (dev_priv->info->is_valleyview) + valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + else + gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + mutex_unlock(&dev_priv->rps.hw_lock); + } + if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) && WARN_ON(!ring->irq_get(ring))) return -ENODEV; @@ -1212,7 +1236,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) return __wait_seqno(ring, seqno, atomic_read(&dev_priv->gpu_error.reset_counter), - interruptible, NULL); + interruptible, NULL, NULL); } static int @@ -1262,6 +1286,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, */ static __must_check int i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, + struct drm_file *file, bool readonly) { struct drm_device *dev = obj->base.dev; @@ -1288,7 +1313,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv); mutex_lock(&dev->struct_mutex); if (ret) return ret; @@ -1337,7 +1362,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ - ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); + ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain); if (ret) goto unref; @@ -2204,6 +2229,8 @@ int __i915_add_request(struct intel_ring_buffer *ring, if (file) { struct drm_i915_file_private *file_priv = file->driver_priv; + cancel_delayed_work_sync(&file_priv->mm.idle_work); + spin_lock(&file_priv->mm.lock); request->file_priv = file_priv; list_add_tail(&request->client_list, @@ -2218,6 +2245,7 @@ int __i915_add_request(struct intel_ring_buffer *ring, i915_queue_hangcheck(ring->dev); if (was_empty) { + cancel_delayed_work_sync(&dev_priv->mm.idle_work); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); @@ -2234,16 +2262,21 @@ static inline void i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { struct drm_i915_file_private *file_priv = request->file_priv; + bool idle; if (!file_priv) return; spin_lock(&file_priv->mm.lock); - if (request->file_priv) { - list_del(&request->client_list); - request->file_priv = NULL; - } + list_del(&request->client_list); + idle = list_empty(&file_priv->mm.request_list); + request->file_priv = NULL; spin_unlock(&file_priv->mm.lock); + + if (idle) + mod_delayed_work(to_i915(request->ring->dev)->wq, + &file_priv->mm.idle_work, + msecs_to_jiffies(100)); } static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, @@ -2488,57 +2521,54 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) WARN_ON(i915_verify_lists(ring->dev)); } -void +bool i915_gem_retire_requests(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; + bool idle = true; int i; - for_each_ring(ring, dev_priv, i) + for_each_ring(ring, dev_priv, i) { i915_gem_retire_requests_ring(ring); + idle &= list_empty(&ring->request_list); + } + + if (idle) + mod_delayed_work(dev_priv->wq, + &dev_priv->mm.idle_work, + msecs_to_jiffies(100)); + + return idle; } static void i915_gem_retire_work_handler(struct work_struct *work) { - drm_i915_private_t *dev_priv; - struct drm_device *dev; - struct intel_ring_buffer *ring; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), mm.retire_work.work); + struct drm_device *dev = dev_priv->dev; bool idle; - int i; - - dev_priv = container_of(work, drm_i915_private_t, - mm.retire_work.work); - dev = dev_priv->dev; /* Come back later if the device is busy... */ - if (!mutex_trylock(&dev->struct_mutex)) { - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); - return; - } - - i915_gem_retire_requests(dev); - - /* Send a periodic flush down the ring so we don't hold onto GEM - * objects indefinitely. - */ - idle = true; - for_each_ring(ring, dev_priv, i) { - if (ring->gpu_caches_dirty) - i915_add_request(ring, NULL); - - idle &= list_empty(&ring->request_list); + idle = false; + if (mutex_trylock(&dev->struct_mutex)) { + idle = i915_gem_retire_requests(dev); + mutex_unlock(&dev->struct_mutex); } - - if (!dev_priv->ums.mm_suspended && !idle) + if (!idle) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); - if (idle) - intel_mark_idle(dev); +} - mutex_unlock(&dev->struct_mutex); +static void +i915_gem_idle_work_handler(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), mm.idle_work.work); + struct drm_device *dev = dev_priv->dev; + + intel_mark_idle(dev); } /** @@ -2636,7 +2666,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); + ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); if (timeout) args->timeout_ns = timespec_to_ns(timeout); return ret; @@ -3846,7 +3876,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (seqno == 0) return 0; - ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); @@ -4320,6 +4350,7 @@ i915_gem_idle(struct drm_device *dev) /* Cancel the retire work handler, which should be idle now. */ cancel_delayed_work_sync(&dev_priv->mm.retire_work); + cancel_delayed_work_sync(&dev_priv->mm.idle_work); return 0; } @@ -4647,6 +4678,8 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); + INIT_DELAYED_WORK(&dev_priv->mm.idle_work, + i915_gem_idle_work_handler); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ @@ -4870,6 +4903,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + cancel_delayed_work_sync(&file_priv->mm.idle_work); + /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. @@ -4887,6 +4922,40 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } +static void +i915_gem_file_idle_work_handler(struct work_struct *work) +{ + struct drm_i915_file_private *file_priv = + container_of(work, typeof(*file_priv), mm.idle_work.work); + + spin_lock(&file_priv->mm.lock); + if (list_empty(&file_priv->mm.request_list)) + atomic_set(&file_priv->rps_wait_boost, false); + spin_unlock(&file_priv->mm.lock); +} + +int i915_gem_open(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv; + + DRM_DEBUG_DRIVER("\n"); + + file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) + return -ENOMEM; + + file->driver_priv = file_priv; + + spin_lock_init(&file_priv->mm.lock); + INIT_LIST_HEAD(&file_priv->mm.request_list); + INIT_DELAYED_WORK(&file_priv->mm.idle_work, + i915_gem_file_idle_work_handler); + + idr_init(&file_priv->context_idr); + + return 0; +} + static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) { if (!mutex_is_locked(mutex)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e93c134a..875125f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -853,17 +853,6 @@ static void gen6_pm_rps_work(struct work_struct *work) gen6_set_rps(dev_priv->dev, new_delay); } - if (IS_VALLEYVIEW(dev_priv->dev)) { - /* - * On VLV, when we enter RC6 we may not be at the minimum - * voltage level, so arm a timer to check. It should only - * fire when there's activity or once after we've entered - * RC6, and then won't be re-armed until the next RPS interrupt. - */ - mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, - msecs_to_jiffies(100)); - } - mutex_unlock(&dev_priv->rps.hw_lock); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fd8a5e8..2d81ac7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7497,6 +7497,9 @@ void intel_mark_idle(struct drm_device *dev) intel_decrease_pllclock(crtc); } + + if (dev_priv->info->gen >= 6) + gen6_rps_idle(dev->dev_private); } void intel_mark_fb_busy(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1760808..189d010 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -793,4 +793,6 @@ extern void hsw_pc8_restore_interrupts(struct drm_device *dev); extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); +extern void gen6_rps_idle(struct drm_i915_private *dev_priv); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ecd022e..89b1d76 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3320,6 +3320,18 @@ void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(val * 50); } +void gen6_rps_idle(struct drm_i915_private *dev_priv) +{ + atomic_set(&dev_priv->rps.wait_boost, false); + + mutex_lock(&dev_priv->rps.hw_lock); + if (dev_priv->info->is_valleyview) + valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + else + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + mutex_unlock(&dev_priv->rps.hw_lock); +} + /* * Wait until the previous freq change has completed, * or the timeout elapsed, and then update our notion @@ -3701,24 +3713,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; } -static void vlv_rps_timer_work(struct work_struct *work) -{ - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - rps.vlv_work.work); - - /* - * Timer fired, we must be idle. Drop to min voltage state. - * Note: we use RPe here since it should match the - * Vmin we were shooting for. That should give us better - * perf when we come back out of RC6 than if we used the - * min freq available. - */ - mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) - valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); - mutex_unlock(&dev_priv->rps.hw_lock); -} - static void valleyview_setup_pctx(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3856,8 +3850,6 @@ static void valleyview_enable_rps(struct drm_device *dev) dev_priv->rps.rpe_delay), dev_priv->rps.rpe_delay); - INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); gen6_enable_rps_interrupts(dev); @@ -4545,8 +4537,6 @@ void intel_disable_gt_powersave(struct drm_device *dev) } else if (INTEL_INFO(dev)->gen >= 6) { cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); cancel_work_sync(&dev_priv->rps.work); - if (IS_VALLEYVIEW(dev)) - cancel_delayed_work_sync(&dev_priv->rps.vlv_work); mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev)) valleyview_disable_rps(dev); -- 1.8.4.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx