iommu_dma_map_mixed and iommu_dma_unmap_mixed operate on IOMMU_DOMAIN_MIXED typed domains. On top of standard iommu_map/unmap they reserve the IOVA window to prevent the iova allocator to allocate in those areas. Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> --- drivers/iommu/dma-iommu.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-iommu.h | 18 ++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 04bbc85..db21143 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -759,3 +759,51 @@ int iommu_get_dma_msi_region_cookie(struct iommu_domain *domain, return 0; } EXPORT_SYMBOL(iommu_get_dma_msi_region_cookie); + +int iommu_dma_map_mixed(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct iova_domain *iovad; + unsigned long lo, hi; + int ret; + + if (domain->type != IOMMU_DOMAIN_MIXED) + return -EINVAL; + + if (!domain->iova_cookie) + return -EINVAL; + + iovad = cookie_iovad(domain); + + lo = iova_pfn(iovad, iova); + hi = iova_pfn(iovad, iova + size - 1); + reserve_iova(iovad, lo, hi); + ret = iommu_map(domain, iova, paddr, size, prot); + if (ret) + free_iova(iovad, lo); + return ret; +} +EXPORT_SYMBOL(iommu_dma_map_mixed); + +size_t iommu_dma_unmap_mixed(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + struct iova_domain *iovad; + unsigned long lo; + size_t ret; + + if (domain->type != IOMMU_DOMAIN_MIXED) + return -EINVAL; + + if (!domain->iova_cookie) + return -EINVAL; + + iovad = cookie_iovad(domain); + lo = iova_pfn(iovad, iova); + + ret = iommu_unmap(domain, iova, size); + if (ret == size) + free_iova(iovad, lo); + return ret; +} +EXPORT_SYMBOL(iommu_dma_unmap_mixed); diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 1c55413..f2aa855 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -70,6 +70,12 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); int iommu_get_dma_msi_region_cookie(struct iommu_domain *domain, dma_addr_t base, u64 size); +int iommu_dma_map_mixed(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + +size_t iommu_dma_unmap_mixed(struct iommu_domain *domain, unsigned long iova, + size_t size); + #else struct iommu_domain; @@ -99,6 +105,18 @@ static inline int iommu_get_dma_msi_region_cookie(struct iommu_domain *domain, return -ENODEV; } +int iommu_dma_map_mixed(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return -ENODEV; +} + +size_t iommu_dma_unmap_mixed(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + return -ENODEV; +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html