Hey, Op 11-01-16 om 22:41 schreef Daniel Vetter: > There's really no reason to not do so, instead of replicating this > for every use-case and every driver. Now we can't just nuke the events, > since that would still mean that all drm_event users would need to know > when that has happened, since calling e.g. drm_send_event isn't allowed > any more. Instead just unlink them from the file, and detect this case > and handle it appropriately in all functions. > > v2: Adjust existing kerneldoc too. > > v3: Improve wording of the kerneldoc and split out vblank cleanup (Laurent). > > Cc: Alex Deucher <alexander.deucher@xxxxxxx> > Cc: Laurent Pinchart <laurent.pinchart@xxxxxxxxxxxxxxxx> > Acked-by: Daniel Stone <daniels@xxxxxxxxxxxxx> > Reviewed-by: Alex Deucher <alexander.deucher@xxxxxxx> (v1) > Signed-off-by: Daniel Vetter <daniel.vetter@xxxxxxxx> This patch breaks kms_flip.basic-flip-vs-wf_vblank and probably other tests as well. > drivers/gpu/drm/drm_fops.c | 30 +++++++++++++++++++++++++++++- > include/drm/drmP.h | 2 ++ > 2 files changed, 31 insertions(+), 1 deletion(-) > > diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c > index d85af1b2a238..109903f5b68a 100644 > --- a/drivers/gpu/drm/drm_fops.c > +++ b/drivers/gpu/drm/drm_fops.c > @@ -264,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) > INIT_LIST_HEAD(&priv->fbs); > mutex_init(&priv->fbs_lock); > INIT_LIST_HEAD(&priv->blobs); > + INIT_LIST_HEAD(&priv->pending_event_list); > INIT_LIST_HEAD(&priv->event_list); > init_waitqueue_head(&priv->event_wait); > priv->event_space = 4096; /* set aside 4k for event buffer */ > @@ -366,6 +367,13 @@ static void drm_events_release(struct drm_file *file_priv) > v->base.destroy(&v->base); > } > > + /* Unlink pending events */ > + list_for_each_entry_safe(e, et, &file_priv->pending_event_list, > + pending_link) { > + list_del(&e->pending_link); > + e->file_priv = NULL; > + } > + > /* Remove unconsumed events */ > list_for_each_entry_safe(e, et, &file_priv->event_list, link) { > list_del(&e->link); > @@ -712,6 +720,7 @@ int drm_event_reserve_init(struct drm_device *dev, > file_priv->event_space -= e->length; > > p->event = e; > + list_add(&p->pending_link, &file_priv->pending_event_list); > p->file_priv = file_priv; > > /* we *could* pass this in as arg, but everyone uses kfree: */ > @@ -736,7 +745,10 @@ void drm_event_cancel_free(struct drm_device *dev, > { > unsigned long flags; > spin_lock_irqsave(&dev->event_lock, flags); > - p->file_priv->event_space += p->event->length; > + if (p->file_priv) { > + p->file_priv->event_space += p->event->length; > + list_del(&p->pending_link); > + } > spin_unlock_irqrestore(&dev->event_lock, flags); > p->destroy(p); > } > @@ -750,11 +762,22 @@ EXPORT_SYMBOL(drm_event_cancel_free); > * This function sends the event @e, initialized with drm_event_reserve_init(), > * to its associated userspace DRM file. Callers must already hold > * dev->event_lock, see drm_send_event() for the unlocked version. > + * > + * Note that the core will take care of unlinking and disarming events when the > + * corresponding DRM file is closed. Drivers need not worry about whether the > + * DRM file for this event still exists and can call this function upon > + * completion of the asynchronous work unconditionally. > */ > void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) > { > assert_spin_locked(&dev->event_lock); > > + if (!e->file_priv) { > + e->destroy(e); > + return; > + } > + > + list_del(&e->pending_link); > list_add_tail(&e->link, > &e->file_priv->event_list); > wake_up_interruptible(&e->file_priv->event_wait); > @@ -769,6 +792,11 @@ EXPORT_SYMBOL(drm_send_event_locked); > * This function sends the event @e, initialized with drm_event_reserve_init(), > * to its associated userspace DRM file. This function acquires dev->event_lock, > * see drm_send_event_locked() for callers which already hold this lock. > + * > + * Note that the core will take care of unlinking and disarming events when the > + * corresponding DRM file is closed. Drivers need not worry about whether the > + * DRM file for this event still exists and can call this function upon > + * completion of the asynchronous work unconditionally. > */ > void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) > { > diff --git a/include/drm/drmP.h b/include/drm/drmP.h > index ae73abf5c2cf..3d78a7406d54 100644 > --- a/include/drm/drmP.h > +++ b/include/drm/drmP.h > @@ -283,6 +283,7 @@ struct drm_ioctl_desc { > struct drm_pending_event { > struct drm_event *event; > struct list_head link; > + struct list_head pending_link; > struct drm_file *file_priv; > pid_t pid; /* pid of requester, no guarantee it's valid by the time > we deliver the event, for tracing only */ > @@ -346,6 +347,7 @@ struct drm_file { > struct list_head blobs; > > wait_queue_head_t event_wait; > + struct list_head pending_event_list; > struct list_head event_list; > int event_space; > _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel