On 2023/8/2 15:46, Tian, Kevin wrote:
From: Liu, Yi L <yi.l.liu@xxxxxxxxx>
Sent: Monday, July 24, 2023 7:14 PM
+static int intel_nested_cache_invalidate_user(struct iommu_domain
*domain,
+ void *user_data)
+{
+ struct iommu_hwpt_vtd_s1_invalidate_desc *req = user_data;
+ struct iommu_hwpt_vtd_s1_invalidate *inv_info = user_data;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned int entry_size = inv_info->entry_size;
+ u64 uptr = inv_info->inv_data_uptr;
+ u64 nr_uptr = inv_info->entry_nr_uptr;
+ struct device_domain_info *info;
+ u32 entry_nr, index;
+ unsigned long flags;
+ int ret = 0;
+
+ if (get_user(entry_nr, (uint32_t __user *)u64_to_user_ptr(nr_uptr)))
+ return -EFAULT;
+
+ for (index = 0; index < entry_nr; index++) {
+ ret = copy_struct_from_user(req, sizeof(*req),
+ u64_to_user_ptr(uptr + index *
entry_size),
+ entry_size);
If continuing this direction then the driver should also check minsz etc.
for struct iommu_hwpt_vtd_s1_invalidate and iommu_hwpt_vtd_s1_invalidate_desc
since they are uAPI and subject to change.
Agreed.
+ if (ret) {
+ pr_err_ratelimited("Failed to fetch invalidation
request\n");
+ break;
+ }
+
+ if (req->__reserved || (req->flags &
~IOMMU_VTD_QI_FLAGS_LEAF) ||
+ !IS_ALIGNED(req->addr, VTD_PAGE_SIZE)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_for_each_entry(info, &dmar_domain->devices, link)
+ intel_nested_invalidate(info->dev, dmar_domain,
+ req->addr, req->npages);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
Disabling interrupt while invalidating iotlb is certainly unacceptable.
Actually there is no need to walk devices. Under dmar_domain there
is already a list of attached iommu's.
Walking device is only necessary when invalidating device TLB. For iotlb
invalidation, it only needs to know the iommu's.
Best regards,
baolu