Support identity domains for devices that do not offer the VIRTIO_IOMMU_F_BYPASS_CONFIG feature, by creating 1:1 mappings between the virtual and physical address space. Identity domains created this way still perform noticeably better than DMA domains, because they don't have the overhead of setting up and tearing down mappings at runtime. The performance difference between this and bypass is minimal in comparison. It does not matter that the physical addresses in the identity mappings do not all correspond to memory. By enabling passthrough we are trusting the device driver and the device itself to only perform DMA to suitable locations. In some cases it may even be desirable to perform DMA to MMIO regions. Signed-off-by: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx> --- drivers/iommu/virtio-iommu.c | 61 +++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index eceb9281c8c1..c9e8367d2962 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -375,6 +375,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain, return unmapped; } +/* + * Fill the domain with identity mappings, skipping the device's reserved + * regions. + */ +static int viommu_domain_map_identity(struct viommu_endpoint *vdev, + struct viommu_domain *vdomain) +{ + int ret; + struct iommu_resv_region *resv; + u64 iova = vdomain->domain.geometry.aperture_start; + u64 limit = vdomain->domain.geometry.aperture_end; + u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); + + iova = ALIGN(iova, granule); + limit = ALIGN_DOWN(limit + 1, granule) - 1; + + list_for_each_entry(resv, &vdev->resv_regions, list) { + u64 resv_start = ALIGN_DOWN(resv->start, granule); + u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; + + if (resv_end < iova || resv_start > limit) + /* No overlap */ + continue; + + if (resv_start > iova) { + ret = viommu_add_mapping(vdomain, iova, resv_start - 1, + (phys_addr_t)iova, flags); + if (ret) + goto err_unmap; + } + + if (resv_end >= limit) + return 0; + + iova = resv_end + 1; + } + + ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova, + flags); + if (ret) + goto err_unmap; + return 0; + +err_unmap: + viommu_del_mappings(vdomain, 0, iova); + return ret; +} + /* * viommu_replay_mappings - re-send MAP requests * @@ -637,14 +686,18 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev, vdomain->viommu = viommu; if (domain->type == IOMMU_DOMAIN_IDENTITY) { - if (!virtio_has_feature(viommu->vdev, - VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + if (virtio_has_feature(viommu->vdev, + VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + vdomain->bypass = true; + return 0; + } + + ret = viommu_domain_map_identity(vdev, vdomain); + if (ret) { ida_free(&viommu->domain_ids, vdomain->id); vdomain->viommu = 0; return -EOPNOTSUPP; } - - vdomain->bypass = true; } return 0; -- 2.33.0 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization