Set the dma_ops per device so we can remove the iommu_no_mapping code. Signed-off-by: Tom Murphy <tmurphy@xxxxxxxxxx> --- drivers/iommu/intel-iommu.c | 85 +++---------------------------------- 1 file changed, 6 insertions(+), 79 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index eace915602f0..2db1dc47e7e4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2622,17 +2622,6 @@ static int __init si_domain_init(int hw) return 0; } -static int identity_mapping(struct device *dev) -{ - struct device_domain_info *info; - - info = dev->archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO) - return (info->domain == si_domain); - - return 0; -} - static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) { struct dmar_domain *ndomain; @@ -3270,43 +3259,6 @@ static unsigned long intel_alloc_iova(struct device *dev, return iova_pfn; } -/* Check if the dev needs to go through non-identity map and unmap process.*/ -static int iommu_no_mapping(struct device *dev) -{ - int found; - - if (iommu_dummy(dev)) - return 1; - - found = identity_mapping(dev); - if (found) { - /* - * If the device's dma_mask is less than the system's memory - * size then this is not a candidate for identity mapping. - */ - u64 dma_mask = *dev->dma_mask; - - if (dev->coherent_dma_mask && - dev->coherent_dma_mask < dma_mask) - dma_mask = dev->coherent_dma_mask; - - if (dma_mask < dma_get_required_mask(dev)) { - /* - * 32 bit DMA is removed from si_domain and fall back - * to non-identity mapping. - */ - dmar_remove_one_dev_info(dev); - dev_warn(dev, "32bit DMA uses non-identity mapping\n"); - - return 0; - } - - return 1; - } - - return 0; -} - static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir, u64 dma_mask) { @@ -3320,9 +3272,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(dev)) - return paddr; - domain = find_domain(dev); if (!domain) return DMA_MAPPING_ERROR; @@ -3391,9 +3340,6 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) struct intel_iommu *iommu; struct page *freelist; - if (iommu_no_mapping(dev)) - return; - domain = find_domain(dev); BUG_ON(!domain); @@ -3442,9 +3388,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, size = PAGE_ALIGN(size); order = get_order(size); - if (!iommu_no_mapping(dev)) - flags &= ~(GFP_DMA | GFP_DMA32); - else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { + if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) flags |= GFP_DMA; else @@ -3456,11 +3400,6 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, page = dma_alloc_from_contiguous(dev, count, order, flags & __GFP_NOWARN); - if (page && iommu_no_mapping(dev) && - page_to_phys(page) + size > dev->coherent_dma_mask) { - dma_release_from_contiguous(dev, page, count); - page = NULL; - } } if (!page) @@ -3510,20 +3449,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); } -static int intel_nontranslate_map_sg(struct device *hddev, - struct scatterlist *sglist, int nelems, int dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) { - BUG_ON(!sg_page(sg)); - sg->dma_address = sg_phys(sg); - sg->dma_length = sg->length; - } - return nelems; -} - static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, unsigned long attrs) { @@ -3538,8 +3463,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(dev)) - return intel_nontranslate_map_sg(dev, sglist, nelems, dir); domain = find_domain(dev); if (!domain) @@ -4570,7 +4493,6 @@ int __init intel_iommu_init(void) #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) swiotlb = 0; #endif - dma_ops = &intel_dma_ops; init_iommu_pm_ops(); @@ -4949,6 +4871,7 @@ static int intel_iommu_add_device(struct device *dev) { struct intel_iommu *iommu; struct iommu_group *group; + struct iommu_domain *domain; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); @@ -4965,6 +4888,10 @@ static int intel_iommu_add_device(struct device *dev) if (IS_ERR(group)) return PTR_ERR(group); + domain = iommu_get_domain_for_dev(dev); + if (domain->type == IOMMU_DOMAIN_DMA) + dev->dma_ops = &intel_dma_ops; + iommu_group_put(group); return 0; } -- 2.17.1