From: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> Proveide a ring notify mechanism where you can ask for a callback when a specific seqno has been passed. Can be used for FBC and mmio flips at least. Signed-off-by: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 10 ++++ drivers/gpu/drm/i915/i915_irq.c | 4 ++ drivers/gpu/drm/i915/intel_ringbuffer.c | 89 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++ 4 files changed, 120 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d857f58..a5e62cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2521,6 +2521,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, struct intel_engine_cs *ring) { + struct intel_ring_notify *notify, *next; + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; @@ -2552,6 +2554,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, kfree(ring->preallocated_lazy_request); ring->preallocated_lazy_request = NULL; ring->outstanding_lazy_seqno = 0; + + spin_lock_irq(&ring->lock); + list_for_each_entry_safe(notify, next, &ring->notify_list, list) { + intel_ring_notify_complete(notify); + /* FIXME should we notify at reset? */ + notify->notify(notify); + } + spin_unlock_irq(&ring->lock); } void i915_gem_restore_fences(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1c1ec22..218f011 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1221,6 +1221,10 @@ static void notify_ring(struct drm_device *dev, if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_notify_mmio_flip(ring); + spin_lock(&ring->lock); + intel_ring_notify_check(ring); + spin_unlock(&ring->lock); + wake_up_all(&ring->irq_queue); i915_queue_hangcheck(dev); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b96edaf..31321ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1447,6 +1447,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, init_waitqueue_head(&ring->irq_queue); + INIT_LIST_HEAD(&ring->notify_list); + spin_lock_init(&ring->lock); + if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); if (ret) @@ -2407,3 +2410,89 @@ intel_stop_ring_buffer(struct intel_engine_cs *ring) stop_ring(ring); } + +void intel_ring_notify_complete(struct intel_ring_notify *notify) +{ + struct intel_engine_cs *ring = notify->ring; + + ring->irq_put(ring); + list_del(¬ify->list); + notify->ring = NULL; +} + +void intel_ring_notify_check(struct intel_engine_cs *ring) +{ + struct intel_ring_notify *notify, *next; + u32 seqno; + + assert_spin_locked(&ring->lock); + + if (list_empty(&ring->notify_list)) + return; + + seqno = ring->get_seqno(ring, false); + + list_for_each_entry_safe(notify, next, &ring->notify_list, list) { + if (i915_seqno_passed(seqno, notify->seqno)) { + intel_ring_notify_complete(notify); + notify->notify(notify); + } + } +} + +int intel_ring_notify_add(struct intel_engine_cs *ring, + struct intel_ring_notify *notify) +{ + unsigned long irqflags; + int ret; + + lockdep_assert_held(&ring->dev->struct_mutex); + + if (WARN_ON(notify->ring != NULL || notify->seqno == 0)) + return -EINVAL; + + if (i915_seqno_passed(ring->get_seqno(ring, true), notify->seqno)) + goto notify_immediately; + + ret = i915_gem_check_olr(ring, notify->seqno); + if (ret) + return ret; + + if (WARN_ON(!ring->irq_get(ring))) + goto notify_immediately; + + spin_lock_irqsave(&ring->lock, irqflags); + notify->ring = ring; + list_add_tail(¬ify->list, &ring->notify_list); + /* check again in case we just missed it */ + intel_ring_notify_check(ring); + spin_unlock_irqrestore(&ring->lock, irqflags); + + return 0; + + notify_immediately: + spin_lock_irqsave(&ring->lock, irqflags); + notify->notify(notify); + spin_unlock_irqrestore(&ring->lock, irqflags); + + return 0; +} + +bool intel_ring_notify_pending(struct intel_ring_notify *notify) +{ + return notify->ring != NULL; +} + +void intel_ring_notify_cancel(struct intel_ring_notify *notify) +{ + struct intel_engine_cs *ring = ACCESS_ONCE(notify->ring); + unsigned long irqflags; + + if (!ring) + return; + + spin_lock_irqsave(&ring->lock, irqflags); + if (notify->ring) + intel_ring_notify_complete(notify); + spin_unlock_irqrestore(&ring->lock, irqflags); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index e72017b..273abf3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -79,6 +79,13 @@ struct intel_ringbuffer { u32 last_retired_head; }; +struct intel_ring_notify { + void (*notify)(struct intel_ring_notify *notify); + struct intel_engine_cs *ring; + struct list_head list; + u32 seqno; +}; + struct intel_engine_cs { const char *name; enum intel_ring_id { @@ -217,6 +224,9 @@ struct intel_engine_cs { * to encode the command length in the header). */ u32 (*get_cmd_length_mask)(u32 cmd_header); + + struct list_head notify_list; + spinlock_t lock; }; static inline bool @@ -335,6 +345,13 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) ring->trace_irq_seqno = seqno; } +void intel_ring_notify_complete(struct intel_ring_notify *notify); +void intel_ring_notify_check(struct intel_engine_cs *ring); +int __must_check intel_ring_notify_add(struct intel_engine_cs *ring, + struct intel_ring_notify *notify); +bool intel_ring_notify_pending(struct intel_ring_notify *notify); +void intel_ring_notify_cancel(struct intel_ring_notify *notify); + /* DRI warts */ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); -- 1.8.5.5 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx