A request to unmap the dma-buf may result from freeing the imported GEM object in e.g. radeon - an action where the struct_mutex is already held. In this circumstance, we cannot take the mutex again and so must defer our updating of the object state until we can take the mutex (i.e. defer it to our workqueue). [ 331.792605] ============================================= [ 331.792782] [ INFO: possible recursive locking detected ] [ 331.792971] 3.16.0-0.rc3.git3.1.fc21.x86_64 #1 Tainted: G W [ 331.793012] --------------------------------------------- [ 331.793012] Xorg.bin/1015 is trying to acquire lock: [ 331.793012] (&dev->struct_mutex){+.+.+.}, at: [<ffffffffa010a919>] i915_gem_unmap_dma_buf+0x39/0x110 [i915] [ 331.793012] but task is already holding lock: [ 331.793012] (&dev->struct_mutex){+.+.+.}, at: [<ffffffffa0084902>] drm_gem_object_handle_unreference_unlocked+0x102/0x130 [drm] [ 331.793012] other info that might help us debug this: [ 331.793012] Possible unsafe locking scenario: [ 331.793012] CPU0 [ 331.793012] ---- [ 331.793012] lock(&dev->struct_mutex); [ 331.793012] lock(&dev->struct_mutex); [ 331.793012] *** DEADLOCK *** [ 331.793012] May be due to missing lock nesting notation [ 331.793012] 1 lock held by Xorg.bin/1015: [ 331.793012] #0: (&dev->struct_mutex){+.+.+.}, at: [<ffffffffa0084902>] drm_gem_object_handle_unreference_unlocked+0x102/0x130 [drm] [ 331.793012] stack backtrace: [ 331.793012] CPU: 0 PID: 1015 Comm: Xorg.bin Tainted: G W 3.16.0-0.rc3.git3.1.fc21.x86_64 #1 [ 331.793012] Hardware name: LENOVO 4058CTO/4058CTO, BIOS 6FET93WW (3.23 ) 10/12/2012 [ 331.793012] 0000000000000000 00000000ad742009 ffff88024ff4ba80 ffffffff81807cec [ 331.793012] ffffffff82bc8240 ffff88024ff4bb60 ffffffff81100fd0 ffffffff81024369 [ 331.793012] ffff88024ff4bac0 ffffffff810e1b3d ffff880000000000 00000000007583ac [ 331.793012] Call Trace: [ 331.793012] [<ffffffff81807cec>] dump_stack+0x4d/0x66 [ 331.793012] [<ffffffff81100fd0>] __lock_acquire+0x1450/0x1ca0 [ 331.793012] [<ffffffff81024369>] ? sched_clock+0x9/0x10 [ 331.793012] [<ffffffff810e1b3d>] ? sched_clock_local+0x1d/0x90 [ 331.793012] [<ffffffff81024369>] ? sched_clock+0x9/0x10 [ 331.793012] [<ffffffff810e1b3d>] ? sched_clock_local+0x1d/0x90 [ 331.793012] [<ffffffff81102104>] lock_acquire+0xa4/0x1d0 [ 331.793012] [<ffffffffa010a919>] ? i915_gem_unmap_dma_buf+0x39/0x110 [i915] [ 331.793012] [<ffffffff8180ccd5>] mutex_lock_nested+0x85/0x440 [ 331.793012] [<ffffffffa010a919>] ? i915_gem_unmap_dma_buf+0x39/0x110 [i915] [ 331.793012] [<ffffffffa010a919>] ? i915_gem_unmap_dma_buf+0x39/0x110 [i915] [ 331.793012] [<ffffffffa010a919>] i915_gem_unmap_dma_buf+0x39/0x110 [i915] [ 331.793012] [<ffffffff81532591>] dma_buf_unmap_attachment+0x51/0x80 [ 331.793012] [<ffffffffa009c6c2>] drm_prime_gem_destroy+0x22/0x40 [drm] [ 331.793012] [<ffffffffa0468112>] radeon_gem_object_free+0x42/0x70 [radeon] [ 331.793012] [<ffffffffa0084387>] drm_gem_object_free+0x27/0x40 [drm] [ 331.793012] [<ffffffffa0084920>] drm_gem_object_handle_unreference_unlocked+0x120/0x130 [drm] [ 331.793012] [<ffffffffa00849ff>] drm_gem_handle_delete+0xcf/0x1a0 [drm] [ 331.793012] [<ffffffffa0085205>] drm_gem_close_ioctl+0x25/0x30 [drm] [ 331.793012] [<ffffffffa0082cdf>] drm_ioctl+0x1df/0x6a0 [drm] [ 331.793012] [<ffffffff81810af6>] ? _raw_spin_unlock_irqrestore+0x36/0x70 [ 331.793012] [<ffffffff810ff72d>] ? trace_hardirqs_on_caller+0x15d/0x200 [ 331.793012] [<ffffffff810ff7dd>] ? trace_hardirqs_on+0xd/0x10 [ 331.793012] [<ffffffffa043604c>] radeon_drm_ioctl+0x4c/0x80 [radeon] [ 331.793012] [<ffffffff812628d0>] do_vfs_ioctl+0x2f0/0x520 [ 331.793012] [<ffffffff8126ef6a>] ? __fget+0x12a/0x2f0 [ 331.793012] [<ffffffff8126ee45>] ? __fget+0x5/0x2f0 [ 331.793012] [<ffffffff8126f1a0>] ? __fget_light+0x30/0x160 [ 331.793012] [<ffffffff81262b81>] SyS_ioctl+0x81/0xa0 [ 331.793012] [<ffffffff818118e9>] system_call_fastpath+0x16/0x1b Reported-by: Shawn Starr <shawn.starr@xxxxxxxxxx> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=80984 Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 36 ++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 580aa42..7d8a87e 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -89,21 +89,49 @@ err: return ERR_PTR(ret); } +struct i915_gem_unmap_dma_buf_work { + struct work_struct base; + struct drm_i915_gem_object *obj; +}; + +static void __i915_gem_unmap_dma_buf_worker(struct work_struct *base) +{ + struct i915_gem_unmap_dma_buf_work *work = + container_of(base, typeof(*work), base); + struct drm_device *dev = work->obj->base.dev; + + mutex_lock(&dev->struct_mutex); + i915_gem_object_unpin_pages(work->obj); + drm_gem_object_unreference(&work->obj->base); + mutex_unlock(&dev->struct_mutex); + + kfree(work); +} + static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - mutex_lock(&obj->base.dev->struct_mutex); - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); - i915_gem_object_unpin_pages(obj); + if (mutex_trylock(&obj->base.dev->struct_mutex)) { + i915_gem_object_unpin_pages(obj); + mutex_unlock(&obj->base.dev->struct_mutex); + } else { + struct i915_gem_unmap_dma_buf_work *work; - mutex_unlock(&obj->base.dev->struct_mutex); + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (work) { + work->obj = obj; + drm_gem_object_reference(&obj->base); + INIT_WORK(&work->base, __i915_gem_unmap_dma_buf_worker); + queue_work(to_i915(obj->base.dev)->wq, &work->base); + } /* or else we WARN when we come to free the exported obj */ + } } static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx