From: Paulo Zanoni <paulo.r.zanoni@xxxxxxxxx> We want to port FBC to the frontbuffer tracking infrastructure, but for that we need to know what caused the object invalidation/flush so we can react accordingly: CPU mmaps need manual, GTT mmaps and flips don't need handling and ring rendering needs nukes. Signed-off-by: Paulo Zanoni <paulo.r.zanoni@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 7 +++++++ drivers/gpu/drm/i915/i915_gem.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 11 +++++++---- drivers/gpu/drm/i915/intel_frontbuffer.c | 15 ++++++++++----- 5 files changed, 30 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9d694f1..ea3cc81 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -685,6 +685,13 @@ struct intel_context { struct list_head link; }; +enum fb_op_origin { + ORIGIN_GTT, + ORIGIN_CPU, + ORIGIN_RENDER, + ORIGIN_FLIP, +}; + struct i915_fbc { unsigned long size; unsigned threshold; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index de241eb..7ef12e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2321,7 +2321,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) list_move_tail(&vma->mm_list, &vm->inactive_list); } - intel_fb_obj_flush(obj, true); + intel_fb_obj_flush(obj, true, ORIGIN_RENDER); list_del_init(&obj->ring_list); @@ -3665,7 +3665,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; - intel_fb_obj_flush(obj, false); + intel_fb_obj_flush(obj, false, ORIGIN_GTT); trace_i915_gem_object_change_domain(obj, obj->base.read_domains, @@ -3688,7 +3688,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; - intel_fb_obj_flush(obj, false); + intel_fb_obj_flush(obj, false, ORIGIN_CPU); trace_i915_gem_object_change_domain(obj, obj->base.read_domains, @@ -3745,7 +3745,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4072,7 +4072,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); trace_i915_gem_object_change_domain(obj, old_read_domains, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0c25f62..af290e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -965,7 +965,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); - intel_fb_obj_invalidate(obj, ring); + intel_fb_obj_invalidate(obj, ring, ORIGIN_RENDER); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 588b618..633fb9a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -841,13 +841,15 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_frontbuffer.c */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring); + struct intel_engine_cs *ring, + enum fb_op_origin origin); void intel_frontbuffer_flip_prepare(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flush(struct drm_device *dev, - unsigned frontbuffer_bits); + unsigned frontbuffer_bits, + enum fb_op_origin origin); /** * intel_frontbuffer_flip - synchronous frontbuffer flip * @dev: DRM device @@ -863,10 +865,11 @@ static inline void intel_frontbuffer_flip(struct drm_device *dev, unsigned frontbuffer_bits) { - intel_frontbuffer_flush(dev, frontbuffer_bits); + intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); } -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, + enum fb_op_origin origin); /* intel_audio.c */ diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 79f6d72..7bdac69 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -127,6 +127,7 @@ static void intel_mark_fb_busy(struct drm_device *dev, * intel_fb_obj_invalidate - invalidate frontbuffer object * @obj: GEM object to invalidate * @ring: set for asynchronous rendering + * @origin: which operation caused the invalidation * * This function gets called every time rendering on the given object starts and * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must @@ -135,7 +136,8 @@ static void intel_mark_fb_busy(struct drm_device *dev, * scheduled. */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring) + struct intel_engine_cs *ring, + enum fb_op_origin origin) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -163,6 +165,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, * intel_frontbuffer_flush - flush frontbuffer * @dev: DRM device * @frontbuffer_bits: frontbuffer plane tracking bits + * @origin: which operation caused the invalidation * * This function gets called every time rendering on the given planes has * completed and frontbuffer caching can be started again. Flushes will get @@ -171,7 +174,8 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, * Can be called without any locks held. */ void intel_frontbuffer_flush(struct drm_device *dev, - unsigned frontbuffer_bits) + unsigned frontbuffer_bits, + enum fb_op_origin origin) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -199,13 +203,14 @@ void intel_frontbuffer_flush(struct drm_device *dev, * intel_fb_obj_flush - flush frontbuffer object * @obj: GEM object to flush * @retire: set when retiring asynchronous rendering + * @origin: which operation caused the invalidation * * This function gets called every time rendering on the given object has * completed and frontbuffer caching can be started again. If @retire is true * then any delayed flushes will be unblocked. */ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, - bool retire) + bool retire, enum fb_op_origin origin) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -227,7 +232,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, mutex_unlock(&dev_priv->fb_tracking.lock); } - intel_frontbuffer_flush(dev, frontbuffer_bits); + intel_frontbuffer_flush(dev, frontbuffer_bits, origin); } /** @@ -275,5 +280,5 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; mutex_unlock(&dev_priv->fb_tracking.lock); - intel_frontbuffer_flush(dev, frontbuffer_bits); + intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); } -- 2.1.3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx