[PATCH 37/81] drm/i915: Add atomic page flip support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx>

Add support for the DRM_MODE_ATOMIC_NONBLOCK flag.

The drm_flip helper provides the necessary logic to track the
progress of the flips. drm_flip is driven by a few extra calls
from the interrupt handling and crtc_disable code paths.

Since the hardware doesn't provide inter-plane synchronization, some
extra software magic is required to avoid flips for multiple planes
ending up on the wrong sides of the vblank leading edge.

Signed-off-by: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_dma.c      |    3 +
 drivers/gpu/drm/i915/i915_drv.h      |    4 +
 drivers/gpu/drm/i915/i915_irq.c      |   16 +-
 drivers/gpu/drm/i915/intel_atomic.c  |  622 +++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_display.c |    2 +
 drivers/gpu/drm/i915/intel_drv.h     |    7 +
 6 files changed, 637 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 392c9a1..8501bf3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1764,6 +1764,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 
 	idr_init(&file_priv->context_idr);
 
+	INIT_LIST_HEAD(&file_priv->pending_flips);
+
 	return 0;
 }
 
@@ -1804,6 +1806,7 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
 	i915_gem_context_close(dev, file_priv);
 	i915_gem_release(dev, file_priv);
+	intel_atomic_free_events(dev, file_priv);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 66b3b64..0bafe7f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,7 @@
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <drm/drm_flip.h>
 #include <drm/intel-gtt.h>
 #include <linux/backlight.h>
 #include <linux/intel-iommu.h>
@@ -957,6 +958,8 @@ typedef struct drm_i915_private {
 	/* Old dri1 support infrastructure, beware the dragons ya fools entering
 	 * here! */
 	struct i915_dri1_state dri1;
+
+	struct drm_flip_driver flip_driver;
 } drm_i915_private_t;
 
 /* Iterate over initialised rings */
@@ -1179,6 +1182,7 @@ struct drm_i915_file_private {
 		struct list_head request_list;
 	} mm;
 	struct idr context_idr;
+	struct list_head pending_flips;
 };
 
 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d02e022..430f201 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -581,8 +581,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) {
 				drm_handle_vblank(dev, pipe);
+				intel_atomic_handle_vblank(dev, pipe);
+			}
 
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 				intel_prepare_page_flip(dev, pipe);
@@ -727,8 +729,10 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 			intel_opregion_gse_intr(dev);
 
 		for (i = 0; i < 3; i++) {
-			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) {
 				drm_handle_vblank(dev, i);
+				intel_atomic_handle_vblank(dev, i);
+			}
 			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
 				intel_prepare_page_flip(dev, i);
 				intel_finish_page_flip_plane(dev, i);
@@ -807,11 +811,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	if (de_iir & DE_GSE)
 		intel_opregion_gse_intr(dev);
 
-	if (de_iir & DE_PIPEA_VBLANK)
+	if (de_iir & DE_PIPEA_VBLANK) {
 		drm_handle_vblank(dev, 0);
+		intel_atomic_handle_vblank(dev, 0);
+	}
 
-	if (de_iir & DE_PIPEB_VBLANK)
+	if (de_iir & DE_PIPEB_VBLANK) {
 		drm_handle_vblank(dev, 1);
+		intel_atomic_handle_vblank(dev, 1);
+	}
 
 	if (de_iir & DE_PLANEA_FLIP_DONE) {
 		intel_prepare_page_flip(dev, 0);
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 3adb140..238a843 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -26,6 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_flip.h>
 
 #include "intel_drv.h"
 
@@ -47,6 +48,20 @@ static struct drm_property *prop_cursor_y;
 static struct drm_property *prop_cursor_w;
 static struct drm_property *prop_cursor_h;
 
+struct intel_flip {
+	struct drm_flip base;
+	u32 vbl_count;
+	bool vblank_ref;
+	bool has_cursor;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_i915_gem_object *old_bo;
+	struct drm_i915_gem_object *old_cursor_bo;
+	struct drm_pending_atomic_event *event;
+	uint32_t old_fb_id;
+	struct list_head pending_head;
+};
+
 struct intel_plane_state {
 	struct drm_plane *plane;
 	struct intel_plane_coords coords;
@@ -54,6 +69,7 @@ struct intel_plane_state {
 	bool pinned;
 	bool changed;
 	struct drm_pending_atomic_event *event;
+	struct intel_flip *flip;
 
 	struct {
 		struct drm_crtc *crtc;
@@ -76,6 +92,7 @@ struct intel_crtc_state {
 	unsigned long encoders_bitmask;
 	bool changed;
 	struct drm_pending_atomic_event *event;
+	struct intel_flip *flip;
 
 	struct {
 		bool enabled;
@@ -966,6 +983,22 @@ static void free_event(struct drm_pending_atomic_event *e)
 	kfree(e);
 }
 
+void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct intel_flip *intel_flip, *next;
+
+	spin_lock_irq(&dev->event_lock);
+
+	list_for_each_entry_safe(intel_flip, next, &file_priv->pending_flips, pending_head) {
+		free_event(intel_flip->event);
+		intel_flip->event = NULL;
+		list_del_init(&intel_flip->pending_head);
+	}
+
+	spin_unlock_irq(&dev->event_lock);
+}
+
 static void queue_event(struct drm_device *dev, struct drm_crtc *crtc,
 			struct drm_pending_atomic_event *e)
 {
@@ -1533,6 +1566,60 @@ static void update_crtc(struct drm_device *dev,
 	}
 }
 
+static void atomic_pipe_commit(struct drm_device *dev,
+			       struct intel_atomic_state *state,
+			       int pipe);
+
+static int apply_nonblocking(struct drm_device *dev, struct intel_atomic_state *s)
+{
+	struct intel_crtc *intel_crtc;
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct intel_crtc *intel_crtc = to_intel_crtc(st->crtc);
+		struct drm_i915_gem_object *obj;
+
+		if (!st->old.fb)
+			continue;
+
+		obj = to_intel_framebuffer(st->old.fb)->obj;
+
+		/* Only one bit per plane in pending_flips */
+		if (atomic_read(&obj->pending_flip) & (1 << intel_crtc->plane))
+			return -EBUSY;
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+		struct intel_plane *intel_plane = to_intel_plane(st->plane);
+		struct drm_i915_gem_object *obj;
+
+		if (!st->old.fb)
+			continue;
+
+		obj = to_intel_framebuffer(st->old.fb)->obj;
+
+		if (!st->old.fb)
+			continue;
+
+		obj = to_intel_framebuffer(st->old.fb)->obj;
+
+		/* Only one bit per plane in pending_flips */
+		if (atomic_read(&obj->pending_flip) & (1 << (16 + intel_plane->pipe)))
+			return -EBUSY;
+	}
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+		atomic_pipe_commit(dev, s, intel_crtc->pipe);
+
+	/* don't restore the old state in end() */
+	s->dirty = false;
+	s->restore_state = false;
+
+	return 0;
+}
+
 static int alloc_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
 {
 	int i;
@@ -1551,6 +1638,13 @@ static int alloc_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
 
 			st->event = e;
 		}
+
+		if (!st->fb_dirty && !st->mode_dirty && !st->cursor_dirty)
+			continue;
+
+		st->flip = kzalloc(sizeof *st->flip, GFP_KERNEL);
+		if (!st->flip)
+			return -ENOMEM;
 	}
 
 
@@ -1568,6 +1662,13 @@ static int alloc_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
 
 			st->event = e;
 		}
+
+		if (!st->dirty)
+			continue;
+
+		st->flip = kzalloc(sizeof *st->flip, GFP_KERNEL);
+		if (!st->flip)
+			return -ENOMEM;
 	}
 
 	return 0;
@@ -1586,6 +1687,9 @@ static void free_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
 			spin_unlock_irq(&dev->event_lock);
 			st->event = NULL;
 		}
+
+		kfree(st->flip);
+		st->flip = NULL;
 	}
 
 	for (i = 0; i < dev->mode_config.num_plane; i++) {
@@ -1597,6 +1701,9 @@ static void free_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
 			spin_unlock_irq(&dev->event_lock);
 			st->event = NULL;
 		}
+
+		kfree(st->flip);
+		st->flip = NULL;
 	}
 }
 
@@ -1605,9 +1712,6 @@ static int intel_atomic_commit(struct drm_device *dev, void *state)
 	struct intel_atomic_state *s = state;
 	int ret;
 
-	if (s->flags & DRM_MODE_ATOMIC_NONBLOCK)
-		return -ENOSYS;
-
 	if (!s->dirty)
 		return 0;
 
@@ -1623,17 +1727,27 @@ static int intel_atomic_commit(struct drm_device *dev, void *state)
 	if (ret)
 		return ret;
 
-	/* apply in a blocking manner */
-	ret = apply_config(dev, s);
-	if (ret) {
-		unpin_cursors(dev, s);
-		unpin_fbs(dev, s);
-		s->restore_hw = true;
-		return ret;
-	}
+	/* try to apply in a non blocking manner */
+	if (s->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+		ret = apply_nonblocking(dev, s);
+		if (ret) {
+			unpin_cursors(dev, s);
+			unpin_fbs(dev, s);
+			return ret;
+		}
+	} else {
+		/* apply in a blocking manner */
+		ret = apply_config(dev, s);
+		if (ret) {
+			unpin_cursors(dev, s);
+			unpin_fbs(dev, s);
+			s->restore_hw = true;
+			return ret;
+		}
 
-	unpin_old_cursors(dev, s);
-	unpin_old_fbs(dev, s);
+		unpin_old_cursors(dev, s);
+		unpin_old_fbs(dev, s);
+	}
 
 	/*
 	 * Either we took the blocking code path, or perhaps the state of
@@ -1703,6 +1817,9 @@ static struct {
 	{ &prop_cursor_y, "CURSOR_Y", INT_MIN, INT_MAX },
 };
 
+static void intel_flip_init(struct drm_device *dev);
+static void intel_flip_fini(struct drm_device *dev);
+
 int intel_atomic_init(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
@@ -1776,6 +1893,8 @@ int intel_atomic_init(struct drm_device *dev)
 
 	dev->driver->atomic_funcs = &intel_atomic_funcs;
 
+	intel_flip_init(dev);
+
 	return 0;
 
  out:
@@ -1792,6 +1911,8 @@ void intel_atomic_fini(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
 
+	intel_flip_fini(dev);
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		drm_property_destroy_blob(dev, crtc->mode_blob);
 		drm_property_destroy_blob(dev, crtc->connector_ids_blob);
@@ -1812,3 +1933,478 @@ void intel_atomic_fini(struct drm_device *dev)
 	drm_property_destroy(dev, prop_src_y);
 	drm_property_destroy(dev, prop_src_x);
 }
+
+enum {
+	/* somwehat arbitrary value */
+	INTEL_VBL_CNT_TIMEOUT = 5,
+};
+
+static void intel_flip_complete(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+	struct drm_device *dev = intel_flip->crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = intel_flip->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (intel_flip->event) {
+		list_del_init(&intel_flip->pending_head);
+		intel_flip->event->event.old_fb_id = intel_flip->old_fb_id;
+		queue_event(dev, crtc, intel_flip->event);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (intel_flip->vblank_ref)
+		drm_vblank_put(dev, pipe);
+
+	/* Possibly allow rendering to old_bo again */
+	if (intel_flip->old_bo) {
+		if (intel_flip->plane) {
+			struct intel_plane *intel_plane = to_intel_plane(intel_flip->plane);
+			atomic_clear_mask(1 << (16 + intel_plane->pipe), &intel_flip->old_bo->pending_flip.counter);
+		} else
+			atomic_clear_mask(1 << intel_crtc->plane, &intel_flip->old_bo->pending_flip.counter);
+
+		if (atomic_read(&intel_flip->old_bo->pending_flip) == 0)
+			wake_up(&dev_priv->pending_flip_queue);
+	}
+}
+
+static void intel_flip_finish(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+	struct drm_device *dev = intel_flip->crtc->dev;
+
+	if (intel_flip->old_bo) {
+		mutex_lock(&dev->struct_mutex);
+
+		intel_unpin_fb_obj(intel_flip->old_bo);
+
+		drm_gem_object_unreference(&intel_flip->old_bo->base);
+
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	if (intel_flip->old_cursor_bo)
+		intel_crtc_cursor_bo_unref(intel_flip->crtc, intel_flip->old_cursor_bo);
+}
+
+static void intel_flip_cleanup(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+
+	kfree(intel_flip);
+}
+
+static void intel_flip_driver_flush(struct drm_flip_driver *driver)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(driver, struct drm_i915_private, flip_driver);
+
+	/* Flush posted writes */
+	I915_READ(PIPEDSL(PIPE_A));
+}
+
+static bool intel_have_new_frmcount(struct drm_device *dev)
+{
+	return IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5;
+}
+
+static u32 get_vbl_count(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	if (intel_have_new_frmcount(dev)) {
+		return I915_READ(PIPE_FRMCOUNT_GM45(pipe));
+	} else  {
+		u32 high, low1, low2, dsl;
+		unsigned int timeout = 0;
+
+		/*
+		 * FIXME check where the frame counter increments, and if
+		 * it happens in the middle of some line, take appropriate
+		 * measures to get a sensible reading.
+		 */
+
+		/* All reads must be satisfied during the same frame */
+		do {
+			low1 = I915_READ(PIPEFRAMEPIXEL(pipe)) >> PIPE_FRAME_LOW_SHIFT;
+			high = I915_READ(PIPEFRAME(pipe)) << 8;
+			dsl = I915_READ(PIPEDSL(pipe));
+			low2 = I915_READ(PIPEFRAMEPIXEL(pipe)) >> PIPE_FRAME_LOW_SHIFT;
+		} while (low1 != low2 && timeout++ < INTEL_VBL_CNT_TIMEOUT);
+
+		if (timeout >= INTEL_VBL_CNT_TIMEOUT)
+			dev_warn(dev->dev,
+				 "Timed out while determining VBL count for pipe %d\n", pipe);
+
+		return ((high | low2) +
+			((dsl >= crtc->hwmode.crtc_vdisplay) &&
+			 (dsl < crtc->hwmode.crtc_vtotal - 1))) & 0xffffff;
+	}
+}
+
+static unsigned int usecs_to_scanlines(struct drm_crtc *crtc,
+				       unsigned int usecs)
+{
+	/* paranoia */
+	if (!crtc->hwmode.crtc_htotal)
+		return 1;
+
+	return DIV_ROUND_UP(usecs * crtc->hwmode.clock,
+			    1000 * crtc->hwmode.crtc_htotal);
+}
+
+static void intel_pipe_vblank_evade(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	/* FIXME needs to be calibrated sensibly */
+	u32 min = crtc->hwmode.crtc_vdisplay - usecs_to_scanlines(crtc, 50);
+	u32 max = crtc->hwmode.crtc_vdisplay - 1;
+	long timeout = msecs_to_jiffies(3);
+	u32 val;
+
+	bool vblank_ref = drm_vblank_get(dev, pipe) == 0;
+
+	intel_crtc->vbl_received = false;
+
+	val = I915_READ(PIPEDSL(pipe));
+
+	while (val >= min && val <= max && timeout > 0) {
+		local_irq_enable();
+
+		timeout = wait_event_timeout(intel_crtc->vbl_wait,
+					     intel_crtc->vbl_received,
+					     timeout);
+
+		local_irq_disable();
+
+		intel_crtc->vbl_received = false;
+
+		val = I915_READ(PIPEDSL(pipe));
+	}
+
+	if (vblank_ref)
+		drm_vblank_put(dev, pipe);
+
+	if (val >= min && val <= max)
+		dev_warn(dev->dev,
+			 "Page flipping close to vblank start (DSL=%u, VBL=%u)\n",
+			 val, crtc->hwmode.crtc_vdisplay);
+}
+
+static bool vbl_count_after_eq_new(u32 a, u32 b)
+{
+	return !((a - b) & 0x80000000);
+}
+
+static bool vbl_count_after_eq(u32 a, u32 b)
+{
+	return !((a - b) & 0x800000);
+}
+
+static bool intel_vbl_check(struct drm_flip *pending_flip, u32 vbl_count)
+{
+	struct intel_flip *old_intel_flip =
+		container_of(pending_flip, struct intel_flip, base);
+	struct drm_device *dev = old_intel_flip->crtc->dev;
+
+	if (intel_have_new_frmcount(dev))
+		return vbl_count_after_eq_new(vbl_count, old_intel_flip->vbl_count);
+	else
+		return vbl_count_after_eq(vbl_count, old_intel_flip->vbl_count);
+}
+
+static void intel_flip_prepare(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+
+	if (intel_flip->plane) {
+		struct drm_plane *plane = intel_flip->plane;
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		intel_plane->prepare(plane);
+	}
+}
+
+static bool intel_flip_flip(struct drm_flip *flip,
+			    struct drm_flip *pending_flip)
+{
+	struct intel_flip *intel_flip = container_of(flip, struct intel_flip, base);
+	struct drm_crtc *crtc = intel_flip->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	int pipe = intel_crtc->pipe;
+	u32 vbl_count;
+
+	intel_flip->vblank_ref = drm_vblank_get(dev, pipe) == 0;
+
+	vbl_count = get_vbl_count(crtc);
+
+	/* arm all the double buffer registers */
+	if (intel_flip->plane) {
+		struct drm_plane *plane = intel_flip->plane;
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		intel_plane->commit(plane);
+	} else {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+
+		dev_priv->display.commit_plane(crtc);
+	}
+
+	if (intel_flip->has_cursor)
+		intel_crtc_cursor_commit(crtc,
+					 intel_crtc->cursor_handle,
+					 intel_crtc->cursor_width,
+					 intel_crtc->cursor_height,
+					 intel_crtc->cursor_bo,
+					 intel_crtc->cursor_addr);
+
+	/* This flip will happen on the next vblank */
+	if (intel_have_new_frmcount(dev))
+		intel_flip->vbl_count = vbl_count + 1;
+	else
+		intel_flip->vbl_count = (vbl_count + 1) & 0xffffff;
+
+	if (pending_flip) {
+		struct intel_flip *old_intel_flip =
+			container_of(pending_flip, struct intel_flip, base);
+		bool flipped = intel_vbl_check(pending_flip, vbl_count);
+
+		if (!flipped) {
+			swap(intel_flip->old_fb_id, old_intel_flip->old_fb_id);
+			swap(intel_flip->old_bo, old_intel_flip->old_bo);
+			swap(intel_flip->old_cursor_bo, old_intel_flip->old_cursor_bo);
+		}
+
+		return flipped;
+	}
+
+	return false;
+}
+
+static bool intel_flip_vblank(struct drm_flip *pending_flip)
+{
+	struct intel_flip *old_intel_flip =
+		container_of(pending_flip, struct intel_flip, base);
+	u32 vbl_count = get_vbl_count(old_intel_flip->crtc);
+
+	return intel_vbl_check(pending_flip, vbl_count);
+}
+
+static const struct drm_flip_helper_funcs intel_flip_funcs = {
+	.prepare = intel_flip_prepare,
+	.flip = intel_flip_flip,
+	.vblank = intel_flip_vblank,
+	.complete = intel_flip_complete,
+	.finish = intel_flip_finish,
+	.cleanup = intel_flip_cleanup,
+};
+
+static const struct drm_flip_driver_funcs intel_flip_driver_funcs = {
+	.flush = intel_flip_driver_flush,
+};
+
+static void intel_flip_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct intel_plane *intel_plane;
+
+	drm_flip_driver_init(&dev_priv->flip_driver, &intel_flip_driver_funcs);
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		init_waitqueue_head(&intel_crtc->vbl_wait);
+
+		drm_flip_helper_init(&intel_crtc->flip_helper,
+				     &dev_priv->flip_driver, &intel_flip_funcs);
+	}
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+		drm_flip_helper_init(&intel_plane->flip_helper,
+				     &dev_priv->flip_driver, &intel_flip_funcs);
+}
+
+static void intel_flip_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct intel_plane *intel_plane;
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+		drm_flip_helper_fini(&intel_plane->flip_helper);
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+		drm_flip_helper_fini(&intel_crtc->flip_helper);
+
+	drm_flip_driver_fini(&dev_priv->flip_driver);
+}
+
+static void atomic_pipe_commit(struct drm_device *dev,
+			       struct intel_atomic_state *state,
+			       int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = state->file->driver_priv;
+	LIST_HEAD(flips);
+	int i;
+	bool pipe_enabled = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe))->active;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &state->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_flip *intel_flip;
+
+		if (!st->fb_dirty && !st->cursor_dirty)
+			continue;
+
+		if (intel_crtc->pipe != pipe)
+			continue;
+
+		intel_flip = st->flip;
+		st->flip = NULL;
+
+		drm_flip_init(&intel_flip->base, &intel_crtc->flip_helper);
+
+		if (st->event) {
+			intel_flip->event = st->event;
+			st->event = NULL;
+			/* need to keep track of it in case process exits */
+			spin_lock_irq(&dev->event_lock);
+			list_add_tail(&intel_flip->pending_head,
+				      &file_priv->pending_flips);
+			spin_unlock_irq(&dev->event_lock);
+		}
+
+		intel_flip->crtc = crtc;
+
+		/* should already be checked so can't fail */
+		/* FIXME refactor the failing parts? */
+		dev_priv->display.calc_plane(crtc, crtc->fb, crtc->x, crtc->y);
+
+		if (st->cursor_dirty) {
+			intel_flip->has_cursor = true;
+			intel_flip->old_cursor_bo = st->old.cursor_bo;
+		}
+
+		if (st->old.fb) {
+			intel_flip->old_fb_id = st->old.fb->base.id;
+			intel_flip->old_bo = to_intel_framebuffer(st->old.fb)->obj;
+
+			mutex_lock(&dev->struct_mutex);
+			drm_gem_object_reference(&intel_flip->old_bo->base);
+			mutex_unlock(&dev->struct_mutex);
+
+			/*
+			 * Block clients from rendering to the new back buffer until
+			 * the flip occurs and the object is no longer visible.
+			 */
+			atomic_set_mask(1 << intel_crtc->plane,
+					&intel_flip->old_bo->pending_flip.counter);
+		}
+
+		list_add_tail(&intel_flip->base.list, &flips);
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &state->plane[i];
+		struct drm_plane *plane = st->plane;
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+		struct intel_flip *intel_flip;
+
+		if (!st->dirty)
+			continue;
+
+		if (intel_plane->pipe != pipe)
+			continue;
+
+		intel_flip = st->flip;
+		st->flip = NULL;
+
+		drm_flip_init(&intel_flip->base, &intel_plane->flip_helper);
+
+		if (st->event) {
+			intel_flip->event = st->event;
+			st->event = NULL;
+			/* need to keep track of it in case process exits */
+			spin_lock_irq(&dev->event_lock);
+			list_add_tail(&intel_flip->pending_head,
+				      &file_priv->pending_flips);
+			spin_unlock_irq(&dev->event_lock);
+		}
+
+		intel_flip->crtc = intel_get_crtc_for_pipe(dev, pipe);
+		intel_flip->plane = plane;
+
+		intel_plane->calc(plane, plane->fb, &st->coords);
+
+		if (st->old.fb) {
+			intel_flip->old_fb_id = st->old.fb->base.id;
+			intel_flip->old_bo = to_intel_framebuffer(st->old.fb)->obj;
+
+			mutex_lock(&dev->struct_mutex);
+			drm_gem_object_reference(&intel_flip->old_bo->base);
+			mutex_unlock(&dev->struct_mutex);
+
+			/*
+			 * Block clients from rendering to the new back buffer until
+			 * the flip occurs and the object is no longer visible.
+			 */
+			atomic_set_mask(1 << (16 + intel_plane->pipe),
+					&intel_flip->old_bo->pending_flip.counter);
+		}
+
+		list_add_tail(&intel_flip->base.list, &flips);
+	}
+
+	if (list_empty(&flips))
+		return;
+
+	if (!pipe_enabled) {
+		drm_flip_driver_complete_flips(&dev_priv->flip_driver, &flips);
+		return;
+	}
+
+	drm_flip_driver_prepare_flips(&dev_priv->flip_driver, &flips);
+
+	local_irq_disable();
+
+	intel_pipe_vblank_evade(intel_get_crtc_for_pipe(dev, pipe));
+
+	drm_flip_driver_schedule_flips(&dev_priv->flip_driver, &flips);
+
+	local_irq_enable();
+}
+
+void intel_atomic_handle_vblank(struct drm_device *dev, int pipe)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	struct intel_plane *intel_plane;
+
+	intel_crtc->vbl_received = true;
+	wake_up(&intel_crtc->vbl_wait);
+
+	drm_flip_helper_vblank(&intel_crtc->flip_helper);
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) {
+		if (intel_plane->pipe == pipe)
+			drm_flip_helper_vblank(&intel_plane->flip_helper);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 62e2a56..7394aca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3554,6 +3554,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 		encoder->disable(encoder);
 
 	intel_crtc_wait_for_pending_flips(crtc);
+	drm_flip_helper_clear(&intel_crtc->flip_helper);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
 
@@ -3674,6 +3675,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
 	/* Give the overlay scaler a chance to disable if it's on this pipe */
 	intel_crtc_wait_for_pending_flips(crtc);
+	drm_flip_helper_clear(&intel_crtc->flip_helper);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_dpms_overlay(intel_crtc, false);
 	intel_crtc_update_cursor(crtc, false);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 103d104..cd37428 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_flip.h>
 
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
@@ -244,6 +245,9 @@ struct intel_crtc {
 	struct intel_pch_pll *pch_pll;
 	uint32_t ddi_pll_sel;
 	struct intel_plane_regs primary_regs;
+	struct drm_flip_helper flip_helper;
+	wait_queue_head_t vbl_wait;
+	bool vbl_received;
 };
 
 struct intel_plane_coords {
@@ -275,6 +279,7 @@ struct intel_plane {
 	void (*prepare)(struct drm_plane *plane);
 	void (*commit)(struct drm_plane *plane);
 	struct intel_plane_regs regs;
+	struct drm_flip_helper flip_helper;
 };
 
 struct intel_watermark_params {
@@ -722,5 +727,7 @@ extern bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
 
 extern int intel_atomic_init(struct drm_device *dev);
 extern void intel_atomic_fini(struct drm_device *dev);
+extern void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file);
+extern void intel_atomic_handle_vblank(struct drm_device *dev, int pipe);
 
 #endif /* __INTEL_DRV_H__ */
-- 
1.7.8.6

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel



[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux