+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ dma_addr = iova_dma_addr(&vdpasim->iova, iova);
+
+ spin_lock(&vdpasim->iommu_lock);
+ ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1, (u64)paddr, perm);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ if (ret) {
+ __free_iova(&vdpasim->iova, iova);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+
+static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
+ size_t size)
+{
+ spin_lock(&vdpasim->iommu_lock);
+ vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
+}
+
static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
- int ret, perm = dir_to_perm(dir);
+ phys_addr_t paddr = page_to_phys(page) + offset;
+ int perm = dir_to_perm(dir);
if (perm < 0)
return DMA_MAPPING_ERROR;
- /* For simplicity, use identical mapping to avoid e.g iova
- * allocator.
- */
- spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
- pa, dir_to_perm(dir));
- spin_unlock(&vdpasim->iommu_lock);
- if (ret)
- return DMA_MAPPING_ERROR;
-
- return (dma_addr_t)(pa);
+ return vdpasim_map_range(vdpasim, paddr, size, perm);
}
static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
@@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
}
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- void *addr = kmalloc(size, flag);
- int ret;
+ phys_addr_t paddr;
+ void *addr;
- spin_lock(&vdpasim->iommu_lock);
+ addr = kmalloc(size, flag);
if (!addr) {
*dma_addr = DMA_MAPPING_ERROR;
- } else {
- u64 pa = virt_to_phys(addr);
-
- ret = vhost_iotlb_add_range(iommu, (u64)pa,
- (u64)pa + size - 1,
- pa, VHOST_MAP_RW);
- if (ret) {
- *dma_addr = DMA_MAPPING_ERROR;
- kfree(addr);
- addr = NULL;
- } else
- *dma_addr = (dma_addr_t)pa;
+ return NULL;
+ }
+
+ paddr = virt_to_phys(addr);
+
+ *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
+ if (*dma_addr == DMA_MAPPING_ERROR) {
+ kfree(addr);
+ return NULL;
}
- spin_unlock(&vdpasim->iommu_lock);
return addr;
}
@@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
- kfree(phys_to_virt((uintptr_t)dma_addr));
+ kfree(vaddr);
}
static const struct dma_map_ops vdpasim_dma_ops = {
@@ -270,6 +285,13 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
for (i = 0; i < dev_attr->nvqs; i++)
vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
+ ret = iova_cache_get();
+ if (ret)
+ goto err_iommu;
+
+ /* For semplicity we use an IOVA allocator with byte granularity */