We only need a very lightweight mechanism here as the locking is only used for co-ordinating a bitfield. v2: Move the cheap unlikely tests into the caller v3: Move the kerneldoc into the header (now separated out into intel_fronbuffer.h for better kerneldoc and readability) Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtien <joonas.lahtinen@xxxxxxxxxxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_frontbuffer.c | 73 ++++++++++---------------------- drivers/gpu/drm/i915/intel_frontbuffer.h | 50 ++++++++++++++++++++-- 4 files changed, 71 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7ba99057c317..b26f5b1f466b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1669,7 +1669,7 @@ struct intel_pipe_crc { }; struct i915_frontbuffer_tracking { - struct mutex lock; + spinlock_t lock; /* * Tracking bits for delayed frontbuffer flushing du to gpu activity or diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d06cb4427137..459f958aadf1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4452,7 +4452,7 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->mm.interruptible = true; - mutex_init(&dev_priv->fb_tracking.lock); + spin_lock_init(&dev_priv->fb_tracking.lock); } void i915_gem_load_cleanup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index b83a70016b12..f15486aee7f8 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -66,35 +66,19 @@ #include "intel_frontbuffer.h" #include "i915_drv.h" -/** - * intel_fb_obj_invalidate - invalidate frontbuffer object - * @obj: GEM object to invalidate - * @origin: which operation caused the invalidation - * - * This function gets called every time rendering on the given object starts and - * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must - * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed - * until the rendering completes or a flip on this frontbuffer plane is - * scheduled. - */ -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - enum fb_op_origin origin) +void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (!obj->frontbuffer_bits) - return; - if (origin == ORIGIN_CS) { - mutex_lock(&dev_priv->fb_tracking.lock); - dev_priv->fb_tracking.busy_bits - |= obj->frontbuffer_bits; - dev_priv->fb_tracking.flip_bits - &= ~obj->frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits; + dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits; + spin_unlock(&dev_priv->fb_tracking.lock); } intel_psr_invalidate(dev, obj->frontbuffer_bits); @@ -121,9 +105,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); /* Delay flushing when rings are still busy.*/ - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); if (!frontbuffer_bits) return; @@ -133,18 +117,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev, intel_fbc_flush(dev_priv, frontbuffer_bits, origin); } -/** - * intel_fb_obj_flush - flush frontbuffer object - * @obj: GEM object to flush - * @retire: set when retiring asynchronous rendering - * @origin: which operation caused the flush - * - * This function gets called every time rendering on the given object has - * completed and frontbuffer caching can be started again. If @retire is true - * then any delayed flushes will be unblocked. - */ -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, - bool retire, enum fb_op_origin origin) +void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -152,21 +127,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (!obj->frontbuffer_bits) - return; - frontbuffer_bits = obj->frontbuffer_bits; if (retire) { - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Filter out new bits since rendering started. */ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; - dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); } - intel_frontbuffer_flush(dev, frontbuffer_bits, origin); + if (frontbuffer_bits) + intel_frontbuffer_flush(dev, frontbuffer_bits, origin); } /** @@ -186,11 +158,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; /* Remove stale busy bits due to the old buffer. */ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); intel_psr_single_frame_update(dev, frontbuffer_bits); } @@ -210,13 +182,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Mask any cancelled flips. */ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); - intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); + if (frontbuffer_bits) + intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); } /** @@ -235,10 +208,10 @@ void intel_frontbuffer_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Remove stale busy bits due to the old buffer. */ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); } diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h index 3d0061454bd5..60a0ec179108 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/intel_frontbuffer.h @@ -28,15 +28,57 @@ struct drm_device; struct drm_i915_private; struct drm_i915_gem_object; -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - enum fb_op_origin origin); void intel_frontbuffer_flip_prepare(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flip(struct drm_device *dev, unsigned frontbuffer_bits); -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, - enum fb_op_origin origin); + +void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); +void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin); + +/** + * intel_fb_obj_invalidate - invalidate frontbuffer object + * @obj: GEM object to invalidate + * @origin: which operation caused the invalidation + * + * This function gets called every time rendering on the given object starts and + * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must + * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed + * until the rendering completes or a flip on this frontbuffer plane is + * scheduled. + */ +static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (!obj->frontbuffer_bits) + return; + + __intel_fb_obj_invalidate(obj, origin); +} + +/** + * intel_fb_obj_flush - flush frontbuffer object + * @obj: GEM object to flush + * @retire: set when retiring asynchronous rendering + * @origin: which operation caused the flush + * + * This function gets called every time rendering on the given object has + * completed and frontbuffer caching can be started again. If @retire is true + * then any delayed flushes will be unblocked. + */ +static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin) +{ + if (!obj->frontbuffer_bits) + return; + + __intel_fb_obj_flush(obj, retire, origin); +} #endif /* __INTEL_FRONTBUFFER_H__ */ -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx