Introduce iommu_get_group_resv_regions whose role consists in enumerating all devices from the group and collecting their reserved regions. The list is sorted and overlaps are checked. Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> --- v3 -> v4: - take the iommu_group lock in iommu_get_group_resv_regions - the list now is sorted and overlaps are checked NOTE: - we do not move list elements from device to group list since the iommu_put_resv_regions() could not be called. - at the moment I did not introduce any iommu_put_group_resv_regions since it simply consists in voiding/freeing the list --- drivers/iommu/iommu.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/iommu.h | 8 ++++++ 2 files changed, 86 insertions(+) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 41c1906..a2d51b3 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -133,6 +133,84 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) return sprintf(buf, "%s\n", group->name); } +static int iommu_insert_resv_region(struct iommu_resv_region *new, + struct list_head *regions) +{ + struct iommu_resv_region *region; + phys_addr_t start = new->start; + phys_addr_t end = new->start + new->length - 1; + struct list_head *pos = regions->next; + + while (pos != regions) { + struct iommu_resv_region *entry = + list_entry(pos, struct iommu_resv_region, list); + phys_addr_t a = entry->start; + phys_addr_t b = entry->start + entry->length - 1; + + if (end < a) { + goto insert; + } else if (start > b) { + pos = pos->next; + } else if ((start >= a) && (end <= b)) { + goto done; + } else { + phys_addr_t new_start = min(a, start); + phys_addr_t new_end = max(b, end); + + list_del(&entry->list); + entry->start = new_start; + entry->length = new_end - new_start + 1; + iommu_insert_resv_region(entry, regions); + } + } +insert: + region = iommu_alloc_resv_region(new->start, new->length, + new->prot, new->type); + if (!region) + return -ENOMEM; + + list_add_tail(®ion->list, pos); +done: + return 0; +} + +static int +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, + struct list_head *group_resv_regions) +{ + struct iommu_resv_region *entry; + int ret; + + list_for_each_entry(entry, dev_resv_regions, list) { + ret = iommu_insert_resv_region(entry, group_resv_regions); + if (ret) + break; + } + return ret; +} + +int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head) +{ + struct iommu_device *device; + int ret = 0; + + mutex_lock(&group->mutex); + list_for_each_entry(device, &group->devices, list) { + struct list_head dev_resv_regions; + + INIT_LIST_HEAD(&dev_resv_regions); + iommu_get_resv_regions(device->dev, &dev_resv_regions); + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); + iommu_put_resv_regions(device->dev, &dev_resv_regions); + if (ret) + break; + } + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); + static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static void iommu_group_release(struct kobject *kobj) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e97a877..b5f8492 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -246,6 +246,8 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, extern int iommu_request_dm_for_dev(struct device *dev); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); +extern int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head); extern int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); @@ -463,6 +465,12 @@ static inline void iommu_put_resv_regions(struct device *dev, { } +static inline int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head) +{ + return -ENODEV; +} + static inline int iommu_request_dm_for_dev(struct device *dev) { return -ENODEV; -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html