> From: Liu, Yi L <yi.l.liu@xxxxxxxxx> > Sent: Monday, February 27, 2023 7:11 PM [...] > @@ -2392,13 +2416,25 @@ static int > vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set) > return ret; > } > > +static bool vfio_dev_in_iommufd_ctx(struct vfio_pci_core_device *vdev, > + struct iommufd_ctx *iommufd_ctx) > +{ > + struct iommufd_ctx *iommufd = vfio_device_iommufd(&vdev- > >vdev); > + > + if (!iommufd) > + return false; > + > + return iommufd == iommufd_ctx; > +} > + > /* > * We need to get memory_lock for each device, but devices can share > mmap_lock, > * therefore we need to zap and hold the vma_lock for each device, and > only then > * get each memory_lock. > */ > static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, > - struct vfio_pci_group_info *groups) > + struct vfio_pci_group_info *groups, > + struct iommufd_ctx *iommufd_ctx) > { > struct vfio_pci_core_device *cur_mem; > struct vfio_pci_core_device *cur_vma; > @@ -2429,10 +2465,27 @@ static int vfio_pci_dev_set_hot_reset(struct > vfio_device_set *dev_set, > > list_for_each_entry(cur_vma, &dev_set->device_list, > vdev.dev_set_list) { > /* > - * Test whether all the affected devices are contained by > the > - * set of groups provided by the user. > + * Test whether all the affected devices can be reset by the > + * user. The affected devices may already been opened or > not > + * yet. > + * > + * For the devices not opened yet, user can reset them. The > + * reason is that the hot reset is done under the protection > + * of the dev_set->lock, and device open is also under this > + * lock. During the hot reset, such devices can not be > opened > + * by other users. > + * > + * For the devices that have been opened, needs to check > the > + * ownership. If the user provides a set of group fds, the > + * ownership check is done by checking if all the opened > + * devices are contained by the groups. If the user provides > + * a zero-length fd array, the ownerhsip check is done by > + * checking if all the opened devices are bound to the same > + * iommufd_ctx. > */ > - if (!vfio_dev_in_groups(cur_vma, groups)) { > + if (cur_vma->vdev.open_count && > + !vfio_dev_in_groups(cur_vma, groups) && > + !vfio_dev_in_iommufd_ctx(cur_vma, iommufd_ctx)) { Hi Alex, Jason, There is one concern on this approach which is related to the cdev noiommu mode. As patch 16 of this series, cdev path supports noiommu mode by passing a negative iommufd to kernel. In such case, the vfio_device is not bound to a valid iommufd. Then the check in vfio_dev_in_iommufd_ctx() is to be broken. An idea is to add a cdev_noiommu flag in vfio_device, when checking the iommufd_ictx, also check this flag. If all the opened devices in the dev_set have vfio_device->cdev_noiommu==true, then the reset is considered to be doable. But there is a special case. If devices in this dev_set are opened by two applications that operates in cdev noiommu mode, then this logic is not able to differentiate them. In that case, should we allow the reset? It seems to ok to allow reset since noiommu mode itself means no security between the applications that use it. thoughts? > ret = -EINVAL; > goto err_undo; > } > diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h > index 2e3cb284711d..64e862a02dad 100644 > --- a/drivers/vfio/vfio.h > +++ b/drivers/vfio/vfio.h > @@ -225,6 +225,11 @@ static inline void vfio_container_cleanup(void) > #if IS_ENABLED(CONFIG_IOMMUFD) > int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx > *ictx); > void vfio_iommufd_unbind(struct vfio_device *device); > +static inline struct iommufd_ctx * > +vfio_device_iommufd(struct vfio_device *device) > +{ > + return device->iommufd_ictx; > +} > #else Regards, Yi Liu