Hi All, On 2018-09-12 17:24, Robin Murphy wrote: > Most parts of iommu-dma already assume they are operating on a default > domain set up by iommu_dma_init_domain(), and can be converted straight > over to avoid the refcounting bottleneck. MSI page mappings may be in > an unmanaged domain with an explicit MSI-only cookie, so retain the > non-specific lookup, but that's OK since they're far from a contended > fast path either way. > > Signed-off-by: Robin Murphy <robin.murphy@xxxxxxx> This breaks Exynos DRM driver with Exynos IOMMU on ARM64 (Exynos5433). Exynos DRM creates it's own domain, attach all devices which performs DMA access (typically CRTC devices) to it and uses standard DMA-mapping calls to allocate/map buffers. This way it can use the same one DMA address for each buffer regardless of the CRTC/display/processing device. This no longer works with this patch. The simplest solution to fix this is add API to change default_domain to the one allocated by the Exynos DRM driver. > --- > drivers/iommu/dma-iommu.c | 23 ++++++++++++----------- > 1 file changed, 12 insertions(+), 11 deletions(-) > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index 511ff9a1d6d9..320f9ea82f3f 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -491,7 +491,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, > void iommu_dma_free(struct device *dev, struct page **pages, size_t size, > dma_addr_t *handle) > { > - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); > + __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); > __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); > *handle = IOMMU_MAPPING_ERROR; > } > @@ -518,7 +518,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, > unsigned long attrs, int prot, dma_addr_t *handle, > void (*flush_page)(struct device *, const void *, phys_addr_t)) > { > - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > + struct iommu_domain *domain = iommu_get_dma_domain(dev); > struct iommu_dma_cookie *cookie = domain->iova_cookie; > struct iova_domain *iovad = &cookie->iovad; > struct page **pages; > @@ -606,9 +606,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) > } > > static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, > - size_t size, int prot) > + size_t size, int prot, struct iommu_domain *domain) > { > - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > struct iommu_dma_cookie *cookie = domain->iova_cookie; > size_t iova_off = 0; > dma_addr_t iova; > @@ -632,13 +631,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, > dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, > unsigned long offset, size_t size, int prot) > { > - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); > + return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, > + iommu_get_dma_domain(dev)); > } > > void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, > enum dma_data_direction dir, unsigned long attrs) > { > - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); > + __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); > } > > /* > @@ -726,7 +726,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) > int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, > int nents, int prot) > { > - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > + struct iommu_domain *domain = iommu_get_dma_domain(dev); > struct iommu_dma_cookie *cookie = domain->iova_cookie; > struct iova_domain *iovad = &cookie->iovad; > struct scatterlist *s, *prev = NULL; > @@ -811,20 +811,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, > sg = tmp; > } > end = sg_dma_address(sg) + sg_dma_len(sg); > - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); > + __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); > } > > dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, > size_t size, enum dma_data_direction dir, unsigned long attrs) > { > return __iommu_dma_map(dev, phys, size, > - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); > + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, > + iommu_get_dma_domain(dev)); > } > > void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, > size_t size, enum dma_data_direction dir, unsigned long attrs) > { > - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); > + __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); > } > > int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) > @@ -850,7 +851,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, > if (!msi_page) > return NULL; > > - iova = __iommu_dma_map(dev, msi_addr, size, prot); > + iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); > if (iommu_dma_mapping_error(dev, iova)) > goto out_free_page; > Best regards -- Marek Szyprowski, PhD Samsung R&D Institute Poland