On 2022/4/29 05:09, Joao Martins wrote:
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5089,6 +5089,113 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
}
}
+static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
+ bool enable)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct device_domain_info *info;
+ unsigned long flags;
+ int ret = -EINVAL;
if (domain_use_first_level(dmar_domain))
return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (list_empty(&dmar_domain->devices)) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return ret;
+ }
I agreed with Kevin's suggestion in his reply.
+
+ list_for_each_entry(info, &dmar_domain->devices, link) {
+ if (!info->dev || (info->domain != dmar_domain))
+ continue;
This check is redundant.
+
+ /* Dirty tracking is second-stage level SM only */
+ if ((info->domain && domain_use_first_level(info->domain)) ||
+ !ecap_slads(info->iommu->ecap) ||
+ !sm_supported(info->iommu) || !intel_iommu_sm) {
+ ret = -EOPNOTSUPP;
+ continue;
Perhaps break and return -EOPNOTSUPP directly here? We are not able to
support a mixed mode, right?
+ }
+
+ ret = intel_pasid_setup_dirty_tracking(info->iommu, info->domain,
+ info->dev, PASID_RID2PASID,
+ enable);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ /*
+ * We need to flush context TLB and IOTLB with any cached translations
+ * to force the incoming DMA requests for have its IOTLB entries tagged
+ * with A/D bits
+ */
+ intel_flush_iotlb_all(domain);
+ return ret;
+}
Best regards,
baolu