On 2022/12/7 7:09, Jason Gunthorpe wrote:
static ssize_t iommu_group_store_type(struct iommu_group *group,
const char *buf, size_t count)
{
- struct group_device *grp_dev;
- struct device *dev;
- int ret, req_type;
+ enum dma_api_policy policy;
+ int ret;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
@@ -2977,77 +2907,30 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
return -EINVAL;
if (sysfs_streq(buf, "identity"))
- req_type = IOMMU_DOMAIN_IDENTITY;
+ policy = DMA_API_POLICY_IDENTITY;
else if (sysfs_streq(buf, "DMA"))
- req_type = IOMMU_DOMAIN_DMA;
+ policy = DMA_API_POLICY_STRICT;
else if (sysfs_streq(buf, "DMA-FQ"))
- req_type = IOMMU_DOMAIN_DMA_FQ;
+ policy = DMA_API_POLICY_LAZY;
else if (sysfs_streq(buf, "auto"))
- req_type = 0;
+ policy = DMA_API_POLICY_ANY;
else
return -EINVAL;
/*
- * Lock/Unlock the group mutex here before device lock to
- * 1. Make sure that the iommu group has only one device (this is a
- * prerequisite for step 2)
- * 2. Get struct *dev which is needed to lock device
- */
- mutex_lock(&group->mutex);
- if (iommu_group_device_count(group) != 1) {
- mutex_unlock(&group->mutex);
- pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
- return -EINVAL;
- }
-
- /* Since group has only one device */
- grp_dev = list_first_entry(&group->devices, struct group_device, list);
- dev = grp_dev->dev;
- get_device(dev);
-
- /*
- * Don't hold the group mutex because taking group mutex first and then
- * the device lock could potentially cause a deadlock as below. Assume
- * two threads T1 and T2. T1 is trying to change default domain of an
- * iommu group and T2 is trying to hot unplug a device or release [1] VF
- * of a PCIe device which is in the same iommu group. T1 takes group
- * mutex and before it could take device lock assume T2 has taken device
- * lock and is yet to take group mutex. Now, both the threads will be
- * waiting for the other thread to release lock. Below, lock order was
- * suggested.
- * device_lock(dev);
- * mutex_lock(&group->mutex);
- * iommu_change_dev_def_domain();
- * mutex_unlock(&group->mutex);
- * device_unlock(dev);
- *
- * [1] Typical device release path
- * device_lock() from device/driver core code
- * -> bus_notifier()
- * -> iommu_bus_notifier()
- * -> iommu_release_device()
- * -> ops->release_device() vendor driver calls back iommu core code
- * -> mutex_lock() from iommu core code
+ * Taking ownership disables the DMA API domain, prevents drivers from
+ * being attached, and switches to a blocking domain. Releasing
+ * ownership will put back the new or original DMA API domain.
*/
- mutex_unlock(&group->mutex);
-
- /* Check if the device in the group still has a driver bound to it */
- device_lock(dev);
With device_lock() removed, this probably races with the
iommu_release_device() path? group->mutex seems insufficient to avoid
the race. Perhaps I missed anything.
- if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
- group->default_domain->type == IOMMU_DOMAIN_DMA)) {
- pr_err_ratelimited("Device is still bound to driver\n");
- ret = -EBUSY;
- goto out;
- }
-
- ret = iommu_change_dev_def_domain(group, dev, req_type);
- ret = ret ?: count;
-
-out:
- device_unlock(dev);
- put_device(dev);
+ ret = iommu_group_claim_dma_owner(group, &ret);
+ if (ret)
+ return ret;
- return ret;
+ ret = iommu_change_group_dma_api_policy(group, policy);
+ iommu_group_release_dma_owner(group);
+ if (ret)
+ return ret;
+ return count;
}
static bool iommu_is_default_domain(struct iommu_group *group)
Best regards,
baolu