drm_wait_vblank must be DRM_UNLOCKED because otherwise it will grab the drm_global_mutex and then go to sleep until the vblank event it is waiting for. That can wreck havoc in the windowing system because if one process issues this ioctl, it will block all other processes for the duration of all vblanks between the current and the one it is waiting for. In some cases it can block the entire windowing system. So, make this ioctl DRM_UNLOCKED, wrap the remaining (very short) critical section with dev->vbl_lock spinlock, and add a comment to the code explaining what we are protecting with the lock. v2: incorporate comments received from Daniel Vetter and Michel Daenzer. Signed-off-by: Ilija Hadzic <ihadzic@xxxxxxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/drm_drv.c | 2 +- drivers/gpu/drm/drm_irq.c | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index e159f17..c990dab 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -124,7 +124,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3830e9e..c8b4da8 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1160,6 +1160,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, union drm_wait_vblank *vblwait = data; int ret = 0; unsigned int flags, seq, crtc, high_crtc; + unsigned long irqflags; if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) return -EINVAL; @@ -1193,6 +1194,13 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } seq = drm_vblank_count(dev, crtc); + /* the lock protects this section from itself executed in */ + /* the context of another PID, ensuring that the process that */ + /* wrote dev->last_vblank_wait is really the last process */ + /* that was here; processes waiting on different CRTCs */ + /* do not need to be interlocked, but rather than introducing */ + /* a new per-crtc lock, we reuse vbl_lock for simplicity */ + spin_lock_irqsave(&dev->vbl_lock, irqflags); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: vblwait->request.sequence += seq; @@ -1200,12 +1208,15 @@ int drm_wait_vblank(struct drm_device *dev, void *data, case _DRM_VBLANK_ABSOLUTE: break; default: + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ret = -EINVAL; goto done; } - if (flags & _DRM_VBLANK_EVENT) + if (flags & _DRM_VBLANK_EVENT) { + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { @@ -1215,6 +1226,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, DRM_DEBUG("waiting on vblank count %d, crtc %d\n", vblwait->request.sequence, crtc); dev->last_vblank_wait[crtc] = vblwait->request.sequence; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || -- 1.7.7 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel