> From: Jason Gunthorpe <jgg@xxxxxxxxxx> > Sent: Friday, April 15, 2022 2:46 AM > > To make it easier to read and change in following patches. > > Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> > --- > virt/kvm/vfio.c | 271 ++++++++++++++++++++++++++---------------------- > 1 file changed, 146 insertions(+), 125 deletions(-) > > This is best viewed using 'git diff -b' to ignore the whitespace change. > > diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c > index 8fcbc50221c2d2..a1167ab7a2246f 100644 > --- a/virt/kvm/vfio.c > +++ b/virt/kvm/vfio.c > @@ -181,149 +181,170 @@ static void kvm_vfio_update_coherency(struct > kvm_device *dev) > mutex_unlock(&kv->lock); > } > > -static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) > +static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) > { > struct kvm_vfio *kv = dev->private; > struct vfio_group *vfio_group; > struct kvm_vfio_group *kvg; > - int32_t __user *argp = (int32_t __user *)(unsigned long)arg; > struct fd f; > - int32_t fd; > int ret; > > + f = fdget(fd); > + if (!f.file) > + return -EBADF; > + > + vfio_group = kvm_vfio_group_get_external_user(f.file); > + fdput(f); > + > + if (IS_ERR(vfio_group)) > + return PTR_ERR(vfio_group); > + > + mutex_lock(&kv->lock); > + > + list_for_each_entry(kvg, &kv->group_list, node) { > + if (kvg->vfio_group == vfio_group) { > + ret = -EEXIST; > + goto err_unlock; > + } > + } > + > + kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT); > + if (!kvg) { > + ret = -ENOMEM; > + goto err_unlock; > + } > + > + list_add_tail(&kvg->node, &kv->group_list); > + kvg->vfio_group = vfio_group; > + > + kvm_arch_start_assignment(dev->kvm); > + > + mutex_unlock(&kv->lock); > + > + kvm_vfio_group_set_kvm(vfio_group, dev->kvm); > + kvm_vfio_update_coherency(dev); > + > + return 0; > +err_unlock: > + mutex_unlock(&kv->lock); > + kvm_vfio_group_put_external_user(vfio_group); > + return ret; > +} > + > +static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd) > +{ > + struct kvm_vfio *kv = dev->private; > + struct kvm_vfio_group *kvg; > + struct fd f; > + int ret; > + > + f = fdget(fd); > + if (!f.file) > + return -EBADF; > + > + ret = -ENOENT; > + > + mutex_lock(&kv->lock); > + > + list_for_each_entry(kvg, &kv->group_list, node) { > + if (!kvm_vfio_external_group_match_file(kvg->vfio_group, > + f.file)) > + continue; > + > + list_del(&kvg->node); > + kvm_arch_end_assignment(dev->kvm); > +#ifdef CONFIG_SPAPR_TCE_IOMMU > + kvm_spapr_tce_release_vfio_group(dev->kvm, kvg- > >vfio_group); > +#endif > + kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); > + kvm_vfio_group_put_external_user(kvg->vfio_group); > + kfree(kvg); > + ret = 0; > + break; > + } > + > + mutex_unlock(&kv->lock); > + > + fdput(f); > + > + kvm_vfio_update_coherency(dev); > + > + return ret; > +} > + > +#ifdef CONFIG_SPAPR_TCE_IOMMU > +static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, > + void __user *arg) > +{ > + struct kvm_vfio_spapr_tce param; > + struct kvm_vfio *kv = dev->private; > + struct vfio_group *vfio_group; > + struct kvm_vfio_group *kvg; > + struct fd f; > + struct iommu_group *grp; > + int ret; > + > + if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce))) > + return -EFAULT; > + > + f = fdget(param.groupfd); > + if (!f.file) > + return -EBADF; > + > + vfio_group = kvm_vfio_group_get_external_user(f.file); > + fdput(f); > + > + if (IS_ERR(vfio_group)) > + return PTR_ERR(vfio_group); > + > + grp = kvm_vfio_group_get_iommu_group(vfio_group); > + if (WARN_ON_ONCE(!grp)) { > + ret = -EIO; > + goto err_put_external; > + } > + > + ret = -ENOENT; > + > + mutex_lock(&kv->lock); > + > + list_for_each_entry(kvg, &kv->group_list, node) { > + if (kvg->vfio_group != vfio_group) > + continue; > + > + ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, > param.tablefd, > + grp); > + break; > + } > + > + mutex_unlock(&kv->lock); > + > + iommu_group_put(grp); > +err_put_external: > + kvm_vfio_group_put_external_user(vfio_group); > + return ret; > +} > +#endif > + > +static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) > +{ > + int32_t __user *argp = (int32_t __user *)(unsigned long)arg; > + int32_t fd; > + > switch (attr) { > case KVM_DEV_VFIO_GROUP_ADD: > if (get_user(fd, argp)) > return -EFAULT; > - > - f = fdget(fd); > - if (!f.file) > - return -EBADF; > - > - vfio_group = kvm_vfio_group_get_external_user(f.file); > - fdput(f); > - > - if (IS_ERR(vfio_group)) > - return PTR_ERR(vfio_group); > - > - mutex_lock(&kv->lock); > - > - list_for_each_entry(kvg, &kv->group_list, node) { > - if (kvg->vfio_group == vfio_group) { > - mutex_unlock(&kv->lock); > - > kvm_vfio_group_put_external_user(vfio_group); > - return -EEXIST; > - } > - } > - > - kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT); > - if (!kvg) { > - mutex_unlock(&kv->lock); > - kvm_vfio_group_put_external_user(vfio_group); > - return -ENOMEM; > - } > - > - list_add_tail(&kvg->node, &kv->group_list); > - kvg->vfio_group = vfio_group; > - > - kvm_arch_start_assignment(dev->kvm); > - > - mutex_unlock(&kv->lock); > - > - kvm_vfio_group_set_kvm(vfio_group, dev->kvm); > - > - kvm_vfio_update_coherency(dev); > - > - return 0; > + return kvm_vfio_group_add(dev, fd); > > case KVM_DEV_VFIO_GROUP_DEL: > if (get_user(fd, argp)) > return -EFAULT; > + return kvm_vfio_group_del(dev, fd); > > - f = fdget(fd); > - if (!f.file) > - return -EBADF; > - > - ret = -ENOENT; > - > - mutex_lock(&kv->lock); > - > - list_for_each_entry(kvg, &kv->group_list, node) { > - if (!kvm_vfio_external_group_match_file(kvg- > >vfio_group, > - f.file)) > - continue; > - > - list_del(&kvg->node); > - kvm_arch_end_assignment(dev->kvm); > #ifdef CONFIG_SPAPR_TCE_IOMMU > - kvm_spapr_tce_release_vfio_group(dev->kvm, > - kvg->vfio_group); > + case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: > + return kvm_vfio_group_set_spapr_tce(dev, (void __user > *)arg); > #endif > - kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); > - kvm_vfio_group_put_external_user(kvg->vfio_group); > - kfree(kvg); > - ret = 0; > - break; > - } > - > - mutex_unlock(&kv->lock); > - > - fdput(f); > - > - kvm_vfio_update_coherency(dev); > - > - return ret; > - > -#ifdef CONFIG_SPAPR_TCE_IOMMU > - case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: { > - struct kvm_vfio_spapr_tce param; > - struct kvm_vfio *kv = dev->private; > - struct vfio_group *vfio_group; > - struct kvm_vfio_group *kvg; > - struct fd f; > - struct iommu_group *grp; > - > - if (copy_from_user(¶m, (void __user *)arg, > - sizeof(struct kvm_vfio_spapr_tce))) > - return -EFAULT; > - > - f = fdget(param.groupfd); > - if (!f.file) > - return -EBADF; > - > - vfio_group = kvm_vfio_group_get_external_user(f.file); > - fdput(f); > - > - if (IS_ERR(vfio_group)) > - return PTR_ERR(vfio_group); > - > - grp = kvm_vfio_group_get_iommu_group(vfio_group); > - if (WARN_ON_ONCE(!grp)) { > - kvm_vfio_group_put_external_user(vfio_group); > - return -EIO; > - } > - > - ret = -ENOENT; > - > - mutex_lock(&kv->lock); > - > - list_for_each_entry(kvg, &kv->group_list, node) { > - if (kvg->vfio_group != vfio_group) > - continue; > - > - ret = kvm_spapr_tce_attach_iommu_group(dev- > >kvm, > - param.tablefd, grp); > - break; > - } > - > - mutex_unlock(&kv->lock); > - > - iommu_group_put(grp); > - kvm_vfio_group_put_external_user(vfio_group); > - > - return ret; > - } > -#endif /* CONFIG_SPAPR_TCE_IOMMU */ > } > > return -ENXIO; > -- > 2.35.1