Hi Greg, This fix is not needed and the issue is actually fixed by below which you just picked for 4.12-stable tree. >From 7f56c30bd0a232822aca38d288da475613bdff9b Mon Sep 17 00:00:00 2001 From: Alex Williamson <alex.williamson@xxxxxxxxxx> Date: Fri, 7 Jul 2017 15:37:38 -0600 Subject: vfio: Remove unnecessary uses of vfio_container.group_lock From: Alex Williamson <alex.williamson@xxxxxxxxxx> commit 7f56c30bd0a232822aca38d288da475613bdff9b upstream. And this patch is already reverted. So please drop this one. Thanks Chuanxiao > -----Original Message----- > From: gregkh@xxxxxxxxxxxxxxxxxxx [mailto:gregkh@xxxxxxxxxxxxxxxxxxx] > Sent: Tuesday, July 25, 2017 12:39 PM > To: Dong, Chuanxiao <chuanxiao.dong@xxxxxxxxx> > Cc: stable@xxxxxxxxxxxxxxx; stable-commits@xxxxxxxxxxxxxxx > Subject: Patch "drm/i915/gvt: Fix possible recursive locking issue" has been > added to the 4.12-stable tree > > > This is a note to let you know that I've just added the patch titled > > drm/i915/gvt: Fix possible recursive locking issue > > to the 4.12-stable tree which can be found at: > http://www.kernel.org/git/?p=linux/kernel/git/stable/stable- > queue.git;a=summary > > The filename of the patch is: > drm-i915-gvt-fix-possible-recursive-locking-issue.patch > and it can be found in the queue-4.12 subdirectory. > > If you, or anyone else, feels it should not be added to the stable tree, please > let <stable@xxxxxxxxxxxxxxx> know about it. > > > From 62d02fd1f807bf5a259a242c483c9fb98a242630 Mon Sep 17 00:00:00 > 2001 > From: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx> > Date: Mon, 26 Jun 2017 15:20:49 +0800 > Subject: drm/i915/gvt: Fix possible recursive locking issue > > From: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx> > > commit 62d02fd1f807bf5a259a242c483c9fb98a242630 upstream. > > vfio_unpin_pages will hold a read semaphore however it is already hold in > the same thread by vfio ioctl. It will cause below warning: > > [ 5102.127454] ============================================ > [ 5102.133379] WARNING: possible recursive locking detected [ 5102.139304] > 4.12.0-rc4+ #3 Not tainted [ 5102.143483] ----------------------------------------- > --- > [ 5102.149407] qemu-system-x86/1620 is trying to acquire lock: > [ 5102.155624] (&container->group_lock){++++++}, at: [<ffffffff817768c6>] > vfio_unpin_pages+0x96/0xf0 [ 5102.165626] but task is already holding lock: > [ 5102.172134] (&container->group_lock){++++++}, at: [<ffffffff8177728f>] > vfio_fops_unl_ioctl+0x5f/0x280 [ 5102.182522] other info that might help us > debug this: > [ 5102.189806] Possible unsafe locking scenario: > > [ 5102.196411] CPU0 > [ 5102.199136] ---- > [ 5102.201861] lock(&container->group_lock); > [ 5102.206527] lock(&container->group_lock); > [ 5102.211191] > --- > drivers/gpu/drm/i915/gvt/gvt.h | 3 ++ > drivers/gpu/drm/i915/gvt/kvmgt.c | 55 > +++++++++++++++++++++++++++++++-------- > 2 files changed, 48 insertions(+), 10 deletions(-) > > --- a/drivers/gpu/drm/i915/gvt/gvt.h > +++ b/drivers/gpu/drm/i915/gvt/gvt.h > @@ -183,6 +183,9 @@ struct intel_vgpu { > struct kvm *kvm; > struct work_struct release_work; > atomic_t released; > + struct work_struct unpin_work; > + spinlock_t unpin_lock; /* To protect unpin_list */ > + struct list_head unpin_list; > } vdev; > #endif > }; > --- a/drivers/gpu/drm/i915/gvt/kvmgt.c > +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c > @@ -78,6 +78,7 @@ struct gvt_dma { > struct rb_node node; > gfn_t gfn; > unsigned long iova; > + struct list_head list; > }; > > static inline bool handle_valid(unsigned long handle) @@ -166,6 +167,7 @@ > static void gvt_cache_add(struct intel_v > > new->gfn = gfn; > new->iova = iova; > + INIT_LIST_HEAD(&new->list); > > mutex_lock(&vgpu->vdev.cache_lock); > while (*link) { > @@ -197,26 +199,52 @@ static void __gvt_cache_remove_entry(str > kfree(entry); > } > > -static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) > +static void intel_vgpu_unpin_work(struct work_struct *work) > { > + struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, > + vdev.unpin_work); > struct device *dev = mdev_dev(vgpu->vdev.mdev); > struct gvt_dma *this; > - unsigned long g1; > - int rc; > + unsigned long gfn; > + > + for (;;) { > + spin_lock(&vgpu->vdev.unpin_lock); > + if (list_empty(&vgpu->vdev.unpin_list)) { > + spin_unlock(&vgpu->vdev.unpin_lock); > + break; > + } > + this = list_first_entry(&vgpu->vdev.unpin_list, > + struct gvt_dma, list); > + list_del(&this->list); > + spin_unlock(&vgpu->vdev.unpin_lock); > + > + gfn = this->gfn; > + vfio_unpin_pages(dev, &gfn, 1); > + kfree(this); > + } > +} > + > +static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn) { > + struct gvt_dma *this; > > mutex_lock(&vgpu->vdev.cache_lock); > this = __gvt_cache_find(vgpu, gfn); > if (!this) { > mutex_unlock(&vgpu->vdev.cache_lock); > - return; > + return false; > } > - > - g1 = gfn; > gvt_dma_unmap_iova(vgpu, this->iova); > - rc = vfio_unpin_pages(dev, &g1, 1); > - WARN_ON(rc != 1); > - __gvt_cache_remove_entry(vgpu, this); > + /* remove this from rb tree */ > + rb_erase(&this->node, &vgpu->vdev.cache); > mutex_unlock(&vgpu->vdev.cache_lock); > + > + /* put this to the unpin_list */ > + spin_lock(&vgpu->vdev.unpin_lock); > + list_move_tail(&this->list, &vgpu->vdev.unpin_list); > + spin_unlock(&vgpu->vdev.unpin_lock); > + > + return true; > } > > static void gvt_cache_init(struct intel_vgpu *vgpu) @@ -453,6 +481,9 @@ > static int intel_vgpu_create(struct kobj > } > > INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); > + INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work); > + spin_lock_init(&vgpu->vdev.unpin_lock); > + INIT_LIST_HEAD(&vgpu->vdev.unpin_list); > > vgpu->vdev.mdev = mdev; > mdev_set_drvdata(mdev, vgpu); > @@ -482,6 +513,7 @@ static int intel_vgpu_iommu_notifier(str > struct intel_vgpu *vgpu = container_of(nb, > struct intel_vgpu, > vdev.iommu_notifier); > + bool sched_unmap = false; > > if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { > struct vfio_iommu_type1_dma_unmap *unmap = data; @@ > -491,7 +523,10 @@ static int intel_vgpu_iommu_notifier(str > end_gfn = gfn + unmap->size / PAGE_SIZE; > > while (gfn < end_gfn) > - gvt_cache_remove(vgpu, gfn++); > + sched_unmap |= gvt_cache_mark_remove(vgpu, > gfn++); > + > + if (sched_unmap) > + schedule_work(&vgpu->vdev.unpin_work); > } > > return NOTIFY_OK; > > > Patches currently in stable-queue which might be from > chuanxiao.dong@xxxxxxxxx are > > queue-4.12/drm-i915-gvt-fix-possible-recursive-locking-issue.patch > queue-4.12/drm-i915-gvt-fix-inconsistent-locks-holding-sequence.patch > queue-4.12/vfio-remove-unnecessary-uses-of- > vfio_container.group_lock.patch