Following patches will chage the APIs to use the struct file as the handle instead of the vfio_group, so hang on to a reference to it with the same duration of as the vfio_group. Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> --- virt/kvm/vfio.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 9b942f447df79d..7e1793a1f5f1fd 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -33,6 +33,7 @@ static void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, struct kvm_vfio_group { struct list_head node; + struct file *filp; struct vfio_group *vfio_group; }; @@ -198,23 +199,17 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) struct kvm_vfio *kv = dev->private; struct vfio_group *vfio_group; struct kvm_vfio_group *kvg; - struct fd f; + struct file *filp; int ret; - f = fdget(fd); - if (!f.file) + filp = fget(fd); + if (!filp) return -EBADF; - vfio_group = kvm_vfio_group_get_external_user(f.file); - fdput(f); - - if (IS_ERR(vfio_group)) - return PTR_ERR(vfio_group); - mutex_lock(&kv->lock); list_for_each_entry(kvg, &kv->group_list, node) { - if (kvg->vfio_group == vfio_group) { + if (kvg->filp == filp) { ret = -EEXIST; goto err_unlock; } @@ -226,6 +221,13 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) goto err_unlock; } + vfio_group = kvm_vfio_group_get_external_user(filp); + if (IS_ERR(vfio_group)) { + ret = PTR_ERR(vfio_group); + goto err_free; + } + + kvg->filp = filp; list_add_tail(&kvg->node, &kv->group_list); kvg->vfio_group = vfio_group; @@ -237,9 +239,11 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) kvm_vfio_update_coherency(dev); return 0; +err_free: + kfree(kvg); err_unlock: mutex_unlock(&kv->lock); - kvm_vfio_group_put_external_user(vfio_group); + fput(filp); return ret; } @@ -268,6 +272,7 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd) kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group); kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); kvm_vfio_group_put_external_user(kvg->vfio_group); + fput(kvg->filp); kfree(kvg); ret = 0; break; @@ -304,10 +309,10 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, return -EBADF; vfio_group = kvm_vfio_group_get_external_user(f.file); - fdput(f); - - if (IS_ERR(vfio_group)) - return PTR_ERR(vfio_group); + if (IS_ERR(vfio_group)) { + ret = PTR_ERR(vfio_group); + goto err_fdput; + } grp = kvm_vfio_group_get_iommu_group(vfio_group); if (WARN_ON_ONCE(!grp)) { @@ -320,7 +325,7 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, mutex_lock(&kv->lock); list_for_each_entry(kvg, &kv->group_list, node) { - if (kvg->vfio_group != vfio_group) + if (kvg->filp != f.file) continue; ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, @@ -333,6 +338,8 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, iommu_group_put(grp); err_put_external: kvm_vfio_group_put_external_user(vfio_group); +err_fdput: + fdput(f); return ret; } @@ -400,6 +407,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev) kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group); kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); kvm_vfio_group_put_external_user(kvg->vfio_group); + fput(kvg->filp); list_del(&kvg->node); kfree(kvg); kvm_arch_end_assignment(dev->kvm); -- 2.35.1