[PATCH 03/16] drm: convert crtc to ww_mutex

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



For atomic, it will be quite convenient to not have to care so much
about locking order.  And 'state' gives us a convenient place to stash a
ww_ctx for any sort of update that needs to grab multiple crtc locks.

Because we will want to eventually make locking even more fine grained
(giving locks to planes, connectors, etc), split out drm_modeset_lock
so that the atomic state won't eventually have to keep separate lists of
locked planes/crtcs/etc.

The state keeps track of which locks it has aquired, and for the benefit
of NONBLOCK operations, supports transfering "locked" resources to
driver worker/thread to complete the asynchronous state change.

Signed-off-by: Rob Clark <robdclark@xxxxxxxxx>
---
 drivers/gpu/drm/drm_atomic.c         | 141 ++++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/drm_crtc.c           | 107 +++++++++++++++++++++++---
 drivers/gpu/drm/i915/intel_display.c |  12 +--
 drivers/gpu/drm/omapdrm/omap_crtc.c  |  10 +--
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c  |   8 +-
 include/drm/drmP.h                   |   5 --
 include/drm/drm_atomic.h             |  18 +++++
 include/drm/drm_crtc.h               |  75 +++++++++++++++++--
 include/uapi/drm/drm_mode.h          |   3 +
 9 files changed, 342 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c3e267a..c73526e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -49,9 +49,14 @@ void *drm_atomic_begin(struct drm_device *dev, uint32_t flags)
 	state = ptr;
 	ptr = &state[1];
 
+	ww_acquire_init(&state->ww_ctx, &crtc_ww_class);
+	INIT_LIST_HEAD(&state->locked);
+
+	mutex_init(&state->mutex);
 	kref_init(&state->refcount);
 	state->dev = dev;
 	state->flags = flags;
+
 	return state;
 }
 EXPORT_SYMBOL(drm_atomic_begin);
@@ -87,10 +92,108 @@ EXPORT_SYMBOL(drm_atomic_set_event);
  */
 int drm_atomic_check(struct drm_device *dev, void *state)
 {
+	a->checked = true;
 	return 0;  /* for now */
 }
 EXPORT_SYMBOL(drm_atomic_check);
 
+/* Note that we drop and re-acquire the locks w/ ww_mutex directly,
+ * since we keep the crtc in our list with in_atomic == true.
+ */
+
+static void drop_locks(struct drm_atomic_state *a,
+		struct ww_acquire_ctx *ww_ctx)
+{
+	struct drm_modeset_lock *lock;
+
+	mutex_lock(&a->mutex);
+	list_for_each_entry(lock, &a->locked, head)
+		ww_mutex_unlock(&lock->mutex);
+	mutex_unlock(&a->mutex);
+
+	ww_acquire_fini(ww_ctx);
+}
+
+static void grab_locks(struct drm_atomic_state *a,
+		struct ww_acquire_ctx *ww_ctx)
+{
+	struct drm_modeset_lock *lock, *slow_locked, *contended;
+	int ret;
+
+	lock = slow_locked = contended = NULL;
+
+
+	ww_acquire_init(ww_ctx, &crtc_ww_class);
+
+	/*
+	 * We need to do proper rain^Hww dance.. another context
+	 * could sneak in a grab the lock in order to check
+	 * crtc->in_atomic, and we get -EDEADLK.  But the winner
+	 * will realize the mistake when it sees crtc->in_atomic
+	 * already set, and then drop lock and return -EBUSY.
+	 * So we just need to keep dancing until we win.
+	 */
+retry:
+	ret = 0;
+	list_for_each_entry(lock, &a->locked, head) {
+		if (lock == slow_locked) {
+			slow_locked = NULL;
+			continue;
+		}
+		contended = lock;
+		ret = ww_mutex_lock(&lock->mutex, ww_ctx);
+		if (ret)
+			goto fail;
+	}
+
+fail:
+	if (ret == -EDEADLK) {
+		/* we lost out in a seqno race, backoff, lock and retry.. */
+
+		list_for_each_entry(lock, &a->locked, head) {
+			if (lock == contended)
+				break;
+			ww_mutex_unlock(&lock->mutex);
+		}
+
+		if (slow_locked)
+			ww_mutex_unlock(&slow_locked->mutex);
+
+		ww_mutex_lock_slow(&contended->mutex, ww_ctx);
+		slow_locked = contended;
+		goto retry;
+	}
+	WARN_ON(ret);   /* if we get EALREADY then something is fubar */
+}
+
+static void commit_locks(struct drm_atomic_state *a,
+		struct ww_acquire_ctx *ww_ctx)
+{
+	/* and properly release them (clear in_atomic, remove from list): */
+	mutex_lock(&a->mutex);
+	while (!list_empty(&a->locked)) {
+		struct drm_modeset_lock *lock;
+
+		lock = list_first_entry(&a->locked,
+				struct drm_modeset_lock, head);
+
+		drm_modeset_unlock(lock);
+	}
+	mutex_unlock(&a->mutex);
+	ww_acquire_fini(ww_ctx);
+	a->committed = true;
+}
+
+static int atomic_commit(struct drm_atomic_state *a,
+		struct ww_acquire_ctx *ww_ctx)
+{
+	int ret = 0;
+
+	commit_locks(a, ww_ctx);
+
+	return ret;
+}
+
 /**
  * drm_atomic_commit - commit state
  * @dev: DRM device
@@ -104,11 +207,26 @@ EXPORT_SYMBOL(drm_atomic_check);
  */
 int drm_atomic_commit(struct drm_device *dev, void *state)
 {
-	return 0;  /* for now */
+	struct drm_atomic_state *a = state;
+	return atomic_commit(a, &a->ww_ctx);
 }
 EXPORT_SYMBOL(drm_atomic_commit);
 
 /**
+ * drm_atomic_commit_unlocked - like drm_atomic_commit
+ * but can be called back by driver in other thread.  Manages the lock
+ * transfer from initiating thread.
+ */
+int drm_atomic_commit_unlocked(struct drm_device *dev, void *state)
+{
+	struct drm_atomic_state *a = state;
+	struct ww_acquire_ctx ww_ctx;
+	grab_locks(a, &ww_ctx);
+	return atomic_commit(a, &ww_ctx);
+}
+EXPORT_SYMBOL(drm_atomic_commit_unlocked);
+
+/**
  * drm_atomic_end - conclude the atomic update
  * @dev: DRM device
  * @state: the driver private state object
@@ -117,15 +235,32 @@ EXPORT_SYMBOL(drm_atomic_commit);
  */
 void drm_atomic_end(struct drm_device *dev, void *state)
 {
+	struct drm_atomic_state *a = state;
+
+	/* if commit is happening from another thread, it will
+	 * block grabbing locks until we drop (and not set
+	 * a->committed until after), so this is not a race:
+	 */
+	if (!a->committed)
+		drop_locks(a, &a->ww_ctx);
+
 	drm_atomic_state_unreference(state);
 }
 EXPORT_SYMBOL(drm_atomic_end);
 
 void _drm_atomic_state_free(struct kref *kref)
 {
-	struct drm_atomic_state *state =
+	struct drm_atomic_state *a =
 		container_of(kref, struct drm_atomic_state, refcount);
-	kfree(state);
+
+	/* in case we haven't already: */
+	if (!a->committed) {
+		grab_locks(a, &a->ww_ctx);
+		commit_locks(a, &a->ww_ctx);
+	}
+
+	mutex_destroy(&a->mutex);
+	kfree(a);
 }
 EXPORT_SYMBOL(_drm_atomic_state_free);
 
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f96ca7f..c68e531 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,91 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_atomic.h>
+
+static int modeset_lock_state(struct drm_modeset_lock *lock,
+		struct drm_atomic_state *a)
+{
+	int ret;
+
+	if (a->flags & DRM_MODE_ATOMIC_NOLOCK)
+		return 0;
+
+	WARN_ON(a->checked);     /* all locks should be held by now! */
+
+retry:
+	ret = ww_mutex_lock(&lock->mutex, &a->ww_ctx);
+	if (!ret) {
+		if (lock->atomic_pending) {
+			/* some other pending update with dropped locks */
+			ww_mutex_unlock(&lock->mutex);
+			if (a->flags & DRM_MODE_ATOMIC_NONBLOCK)
+				return -EBUSY;
+			wait_event(lock->event, !lock->atomic_pending);
+			goto retry;
+		}
+		lock->atomic_pending = true;
+		WARN_ON(!list_empty(&lock->head));
+		list_add(&lock->head, &a->locked);
+	} else if (ret == -EALREADY) {
+		/* we already hold the lock.. this is fine */
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * drm_modeset_lock - take modeset lock
+ * @lock: lock to take
+ * @state: atomic state
+ *
+ * If state is not NULL, then then it's acquire context is used
+ * and the lock does not need to be explicitly unlocked, it
+ * will be automatically unlocked when the atomic update is
+ * complete
+ */
+int drm_modeset_lock(struct drm_modeset_lock *lock, void *state)
+{
+	if (state)
+		return modeset_lock_state(lock, state);
+
+	ww_mutex_lock(&lock->mutex, NULL);
+	return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock);
+
+/**
+ * drm_modeset_lock_all_crtcs - helper to drm_modeset_lock() all CRTCs
+ */
+int drm_modeset_lock_all_crtcs(struct drm_device *dev, void *state)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+	list_for_each_entry(crtc, &config->crtc_list, head) {
+		ret = drm_modeset_lock(&crtc->mutex, state);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
+
+/**
+ * drm_modeset_unlock - drop modeset lock
+ * @lock: lock to release
+ */
+void drm_modeset_unlock(struct drm_modeset_lock *lock)
+{
+	list_del_init(&lock->head);
+	lock->atomic_pending = false;
+	ww_mutex_unlock(&lock->mutex);
+	wake_up_all(&lock->event);
+}
+EXPORT_SYMBOL(drm_modeset_unlock);
 
 /**
  * drm_modeset_lock_all - take all modeset locks
@@ -52,7 +137,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
 	mutex_lock(&dev->mode_config.mutex);
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+		drm_modeset_lock(&crtc->mutex, NULL);
 }
 EXPORT_SYMBOL(drm_modeset_lock_all);
 
@@ -65,7 +150,7 @@ void drm_modeset_unlock_all(struct drm_device *dev)
 	struct drm_crtc *crtc;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		mutex_unlock(&crtc->mutex);
+		drm_modeset_unlock(&crtc->mutex);
 
 	mutex_unlock(&dev->mode_config.mutex);
 }
@@ -84,7 +169,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
 		return;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		WARN_ON(!mutex_is_locked(&crtc->mutex));
+		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 }
@@ -630,6 +715,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
 }
 EXPORT_SYMBOL(drm_framebuffer_remove);
 
+DEFINE_WW_CLASS(crtc_ww_class);
+
 /**
  * drm_crtc_init - Initialise a new CRTC object
  * @dev: DRM device
@@ -652,8 +739,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 	crtc->invert_dimensions = false;
 
 	drm_modeset_lock_all(dev);
-	mutex_init(&crtc->mutex);
-	mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+	drm_modeset_lock_init(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);  /* dropped by _unlock_all() */
 
 	ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
 	if (ret)
@@ -689,6 +776,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 	kfree(crtc->gamma_store);
 	crtc->gamma_store = NULL;
 
+	drm_modeset_lock_fini(&crtc->mutex);
+
 	drm_mode_object_put(dev, &crtc->base);
 	list_del(&crtc->head);
 	dev->mode_config.num_crtc--;
@@ -2578,7 +2667,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
 	}
 	crtc = obj_to_crtc(obj);
 
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 	if (req->flags & DRM_MODE_CURSOR_BO) {
 		if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
 			ret = -ENXIO;
@@ -2602,7 +2691,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
 		}
 	}
 out:
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 
 	return ret;
 
@@ -3949,7 +4038,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 		return -ENOENT;
 	crtc = obj_to_crtc(obj);
 
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 	if (crtc->primary->fb == NULL) {
 		/* The framebuffer is currently unbound, presumably
 		 * due to a hotplug event, that userspace has not
@@ -4033,7 +4122,7 @@ out:
 		drm_framebuffer_unreference(fb);
 	if (old_fb)
 		drm_framebuffer_unreference(old_fb);
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0c69c67..7c0bd2e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2285,7 +2285,7 @@ void intel_display_handle_reset(struct drm_device *dev)
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-		mutex_lock(&crtc->mutex);
+		drm_modeset_lock(&crtc->mutex, NULL);
 		/*
 		 * FIXME: Once we have proper support for primary planes (and
 		 * disabling them without disabling the entire crtc) allow again
@@ -2296,7 +2296,7 @@ void intel_display_handle_reset(struct drm_device *dev)
 						       crtc->primary->fb,
 						       crtc->x,
 						       crtc->y);
-		mutex_unlock(&crtc->mutex);
+		drm_modeset_unlock(&crtc->mutex);
 	}
 }
 
@@ -7817,7 +7817,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 	if (encoder->crtc) {
 		crtc = encoder->crtc;
 
-		mutex_lock(&crtc->mutex);
+		drm_modeset_lock(&crtc->mutex, NULL);
 
 		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
@@ -7848,7 +7848,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 		return false;
 	}
 
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 	intel_encoder->new_crtc = to_intel_crtc(crtc);
 	to_intel_connector(connector)->new_encoder = intel_encoder;
 
@@ -7927,7 +7927,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 			drm_framebuffer_unreference(old->release_fb);
 		}
 
-		mutex_unlock(&crtc->mutex);
+		drm_modeset_unlock(&crtc->mutex);
 		return;
 	}
 
@@ -7935,7 +7935,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 	if (old->dpms_mode != DRM_MODE_DPMS_ON)
 		connector->funcs->dpms(connector, old->dpms_mode);
 
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 }
 
 static int i9xx_pll_refclk(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 846d509..9d089bd 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -307,13 +307,13 @@ static void page_flip_worker(struct work_struct *work)
 	struct drm_display_mode *mode = &crtc->mode;
 	struct drm_gem_object *bo;
 
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 	omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
 			0, 0, mode->hdisplay, mode->vdisplay,
 			crtc->x << 16, crtc->y << 16,
 			mode->hdisplay << 16, mode->vdisplay << 16,
 			vblank_cb, crtc);
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 
 	bo = omap_framebuffer_bo(crtc->fb, 0);
 	drm_gem_object_unreference_unlocked(bo);
@@ -447,7 +447,7 @@ static void apply_worker(struct work_struct *work)
 	 * the callbacks and list modification all serialized
 	 * with respect to modesetting ioctls from userspace.
 	 */
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 	dispc_runtime_get();
 
 	/*
@@ -492,7 +492,7 @@ static void apply_worker(struct work_struct *work)
 
 out:
 	dispc_runtime_put();
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 }
 
 int omap_crtc_apply(struct drm_crtc *crtc,
@@ -500,7 +500,7 @@ int omap_crtc_apply(struct drm_crtc *crtc,
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
-	WARN_ON(!mutex_is_locked(&crtc->mutex));
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	/* no need to queue it again if it is already queued: */
 	if (apply->queued)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 53bbcbe..9bcf767 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -187,7 +187,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 	 * can do this since the caller in the drm core doesn't check anything
 	 * which is protected by any looks.
 	 */
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 	drm_modeset_lock_all(dev_priv->dev);
 
 	/* A lot of the code assumes this */
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 	ret = 0;
 out:
 	drm_modeset_unlock_all(dev_priv->dev);
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 
 	return ret;
 }
@@ -273,7 +273,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 	 * can do this since the caller in the drm core doesn't check anything
 	 * which is protected by any looks.
 	 */
-	mutex_unlock(&crtc->mutex);
+	drm_modeset_unlock(&crtc->mutex);
 	drm_modeset_lock_all(dev_priv->dev);
 
 	vmw_cursor_update_position(dev_priv, shown,
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 				   du->cursor_y + du->hotspot_y);
 
 	drm_modeset_unlock_all(dev_priv->dev);
-	mutex_lock(&crtc->mutex);
+	drm_modeset_lock(&crtc->mutex, NULL);
 
 	return 0;
 }
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b4058f5..2f72875 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1318,11 +1318,6 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
 	return ret;
 }
 
-static inline bool drm_modeset_is_locked(struct drm_device *dev)
-{
-	return mutex_is_locked(&dev->mode_config.mutex);
-}
-
 static inline bool drm_is_render_client(struct drm_file *file_priv)
 {
 	return file_priv->minor->type == DRM_MINOR_RENDER;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index afeefe3..bbf7f68 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -73,6 +73,7 @@ int drm_atomic_set_event(struct drm_device *dev,
 		struct drm_pending_vblank_event *event);
 int drm_atomic_check(struct drm_device *dev, void *state);
 int drm_atomic_commit(struct drm_device *dev, void *state);
+int drm_atomic_commit_unlocked(struct drm_device *dev, void *state);
 void drm_atomic_end(struct drm_device *dev, void *state);
 
 /**
@@ -82,6 +83,23 @@ struct drm_atomic_state {
 	struct kref refcount;
 	struct drm_device *dev;
 	uint32_t flags;
+
+	bool committed;
+	bool checked;       /* just for debugging */
+
+	struct ww_acquire_ctx ww_ctx;
+	/* list of 'struct drm_modeset_lock': */
+	struct list_head locked;
+
+	/* currently simply for protecting against 'locked' list manipulation
+	 * between original thread calling atomic->end() and driver thread
+	 * calling back drm_atomic_commit_unlocked().
+	 *
+	 * Other spots are sufficiently synchronized by virtue of holding
+	 * the lock's ww_mutex.  But during the lock/resource hand-over to the
+	 * driver thread (drop_locks()/grab_locks()), we cannot rely on this.
+	 */
+	struct mutex mutex;
 };
 
 static inline void
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index e75cee6..464057e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -27,6 +27,7 @@
 
 #include <linux/i2c.h>
 #include <linux/spinlock.h>
+#include <linux/ww_mutex.h>
 #include <linux/types.h>
 #include <linux/idr.h>
 #include <linux/fb.h>
@@ -311,6 +312,73 @@ struct drm_property {
 	struct list_head enum_blob_list;
 };
 
+/**
+ * drm_modeset_lock - used for locking modeset resources.
+ * @mutex: resource locking
+ * @atomic_pending: is this resource part of a still-pending
+ *    atomic update
+ * @head: used to hold it's place on state->locked list when
+ *    part of an atomic update
+ *
+ * Used for locking CRTCs and other modeset resources.
+ */
+struct drm_modeset_lock {
+	/**
+	 * modeset lock
+	 */
+	struct ww_mutex mutex;
+
+	/**
+	 * Are we busy (pending asynchronous/NONBLOCK update)?  Any further
+	 * asynchronous update will return -EBUSY if it also needs to acquire
+	 * this lock.  While a synchronous update will block until the pending
+	 * async update completes.
+	 *
+	 * Drivers must ensure the update is completed before sending vblank
+	 * event to userspace.  Typically this just means don't send event
+	 * before drm_atomic_commit_unlocked() returns.
+	 */
+	bool atomic_pending;
+
+	/**
+	 * Resources that are locked as part of an atomic update are added
+	 * to a list (so we know what to unlock at the end).
+	 */
+	struct list_head head;
+
+	/**
+	 * For waiting on atomic_pending locks, if not a NONBLOCK operation.
+	 */
+	wait_queue_head_t event;
+};
+
+extern struct ww_class crtc_ww_class;
+
+static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+	ww_mutex_init(&lock->mutex, &crtc_ww_class);
+	INIT_LIST_HEAD(&lock->head);
+	init_waitqueue_head(&lock->event);
+}
+
+static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
+{
+	WARN_ON(!list_empty(&lock->head));
+}
+
+static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
+{
+	return ww_mutex_is_locked(&lock->mutex);
+}
+
+int drm_modeset_lock(struct drm_modeset_lock *lock, void *state);
+int drm_modeset_lock_all_crtcs(struct drm_device *dev, void *state);
+void drm_modeset_unlock(struct drm_modeset_lock *lock);
+
+void drm_modeset_lock_all(struct drm_device *dev);
+void drm_modeset_unlock_all(struct drm_device *dev);
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
+
 struct drm_crtc;
 struct drm_connector;
 struct drm_encoder;
@@ -387,6 +455,7 @@ struct drm_crtc_funcs {
  * drm_crtc - central CRTC control structure
  * @dev: parent DRM device
  * @head: list management
+ * @mutex: per-CRTC locking
  * @base: base KMS object for ID tracking etc.
  * @enabled: is this CRTC enabled?
  * @mode: current mode timings
@@ -419,7 +488,7 @@ struct drm_crtc {
 	 * state, ...) and a write lock for everything which can be update
 	 * without a full modeset (fb, cursor data, ...)
 	 */
-	struct mutex mutex;
+	struct drm_modeset_lock mutex;
 
 	struct drm_mode_object base;
 
@@ -1027,10 +1096,6 @@ struct drm_prop_enum_list {
 	char *name;
 };
 
-extern void drm_modeset_lock_all(struct drm_device *dev);
-extern void drm_modeset_unlock_all(struct drm_device *dev);
-extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
-
 extern int drm_crtc_init(struct drm_device *dev,
 			 struct drm_crtc *crtc,
 			 struct drm_plane *primary,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f104c26..12e2139 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -496,4 +496,7 @@ struct drm_mode_destroy_dumb {
 	uint32_t handle;
 };
 
+#define DRM_MODE_ATOMIC_NONBLOCK  0x0200
+#define DRM_MODE_ATOMIC_NOLOCK    0x8000  /* only used internally */
+
 #endif
-- 
1.8.5.3

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux