[PATCH] drm: Clean up pending events in the core

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



There's really no reason to not do so, instead of replicating this
for every use-case and every driver. Now we can't just nuke the events,
since that would still mean that all drm_event users would need to know
when that has happened, since calling e.g. drm_send_event isn't allowed
any more. Instead just unlink them from the file, and detect this case
and handle it appropriately in all functions.

v2: Adjust existing kerneldoc too.

Cc: Alex Deucher <alexander.deucher@xxxxxxx>
Signed-off-by: Daniel Vetter <daniel.vetter@xxxxxxxx>
---
 drivers/gpu/drm/drm_fops.c | 35 ++++++++++++++++++++++++++---------
 include/drm/drmP.h         |  2 ++
 2 files changed, 28 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index d85af1b2a238..d32b24c74e08 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -264,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 	INIT_LIST_HEAD(&priv->fbs);
 	mutex_init(&priv->fbs_lock);
 	INIT_LIST_HEAD(&priv->blobs);
+	INIT_LIST_HEAD(&priv->pending_event_list);
 	INIT_LIST_HEAD(&priv->event_list);
 	init_waitqueue_head(&priv->event_wait);
 	priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -353,18 +354,16 @@ static void drm_events_release(struct drm_file *file_priv)
 {
 	struct drm_device *dev = file_priv->minor->dev;
 	struct drm_pending_event *e, *et;
-	struct drm_pending_vblank_event *v, *vt;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
-	/* Remove pending flips */
-	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
-		if (v->base.file_priv == file_priv) {
-			list_del(&v->base.link);
-			drm_vblank_put(dev, v->pipe);
-			v->base.destroy(&v->base);
-		}
+	/* Unlink pending events */
+	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+				 pending_link) {
+		list_del(&e->pending_link);
+		e->file_priv = NULL;
+	}
 
 	/* Remove unconsumed events */
 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
@@ -712,6 +711,7 @@ int drm_event_reserve_init(struct drm_device *dev,
 	file_priv->event_space -= e->length;
 
 	p->event = e;
+	list_add(&p->pending_link, &file_priv->pending_event_list);
 	p->file_priv = file_priv;
 
 	/* we *could* pass this in as arg, but everyone uses kfree: */
@@ -736,7 +736,10 @@ void drm_event_cancel_free(struct drm_device *dev,
 {
 	unsigned long flags;
 	spin_lock_irqsave(&dev->event_lock, flags);
-	p->file_priv->event_space += p->event->length;
+	if (p->file_priv) {
+		p->file_priv->event_space += p->event->length;
+		list_del(&p->pending_link);
+	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 	p->destroy(p);
 }
@@ -750,11 +753,21 @@ EXPORT_SYMBOL(drm_event_cancel_free);
  * This function sends the event @e, initialized with drm_event_reserve_init(),
  * to its associated userspace DRM file. Callers must already hold
  * dev->event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need to worry about that and can
+ * call this function upon completion of the asynchrnous work unconditionally.
  */
 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
 {
 	assert_spin_locked(&dev->event_lock);
 
+	if (!e->file_priv) {
+		e->destroy(e);
+		return;
+	}
+
+	list_del(&e->pending_link);
 	list_add_tail(&e->link,
 		      &e->file_priv->event_list);
 	wake_up_interruptible(&e->file_priv->event_wait);
@@ -769,6 +782,10 @@ EXPORT_SYMBOL(drm_send_event_locked);
  * This function sends the event @e, initialized with drm_event_reserve_init(),
  * to its associated userspace DRM file. This function acquires dev->event_lock,
  * see drm_send_event_locked() for callers which already hold this lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need to worry about that and can
+ * call this function upon completion of the asynchrnous work unconditionally.
  */
 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
 {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index ae73abf5c2cf..3d78a7406d54 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -283,6 +283,7 @@ struct drm_ioctl_desc {
 struct drm_pending_event {
 	struct drm_event *event;
 	struct list_head link;
+	struct list_head pending_link;
 	struct drm_file *file_priv;
 	pid_t pid; /* pid of requester, no guarantee it's valid by the time
 		      we deliver the event, for tracing only */
@@ -346,6 +347,7 @@ struct drm_file {
 	struct list_head blobs;
 
 	wait_queue_head_t event_wait;
+	struct list_head pending_event_list;
 	struct list_head event_list;
 	int event_space;
 
-- 
2.6.4

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux