To prepare for the ASID support for vhost-vdpa, try to pass IOTLB object to dma helpers. No functional changes, it's just a preparation for support multiple IOTLBs. Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx> Signed-off-by: Gautam Dawar <gdawar@xxxxxxxxxx> --- drivers/vhost/vdpa.c | 67 ++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 851539807bc9..146911082514 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -503,10 +503,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, return r; } -static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last) +static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, + u64 start, u64 last) { struct vhost_dev *dev = &v->vdev; - struct vhost_iotlb *iotlb = dev->iotlb; struct vhost_iotlb_map *map; struct page *page; unsigned long pfn, pinned; @@ -525,10 +526,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last) } } -static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last) +static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, + u64 start, u64 last) { - struct vhost_dev *dev = &v->vdev; - struct vhost_iotlb *iotlb = dev->iotlb; struct vhost_iotlb_map *map; struct vdpa_map_file *map_file; @@ -540,21 +541,24 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last) } } -static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) +static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, + u64 start, u64 last) { struct vdpa_device *vdpa = v->vdpa; if (vdpa->use_va) - return vhost_vdpa_va_unmap(v, start, last); + return vhost_vdpa_va_unmap(v, iotlb, start, last); - return vhost_vdpa_pa_unmap(v, start, last); + return vhost_vdpa_pa_unmap(v, iotlb, start, last); } static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v) { struct vhost_dev *dev = &v->vdev; + struct vhost_iotlb *iotlb = dev->iotlb; - vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1); + vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1); kfree(dev->iotlb); dev->iotlb = NULL; } @@ -581,15 +585,15 @@ static int perm_to_iommu_flags(u32 perm) return flags | IOMMU_CACHE; } -static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, - u64 size, u64 pa, u32 perm, void *opaque) +static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, + u64 iova, u64 size, u64 pa, u32 perm, void *opaque) { struct vhost_dev *dev = &v->vdev; struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; int r = 0; - r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1, + r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1, pa, perm, opaque); if (r) return r; @@ -598,13 +602,13 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, r = ops->dma_map(vdpa, iova, size, pa, perm, opaque); } else if (ops->set_map) { if (!v->in_batch) - r = ops->set_map(vdpa, dev->iotlb); + r = ops->set_map(vdpa, iotlb); } else { r = iommu_map(v->domain, iova, pa, size, perm_to_iommu_flags(perm)); } if (r) { - vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); + vhost_iotlb_del_range(iotlb, iova, iova + size - 1); return r; } @@ -614,25 +618,27 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, return 0; } -static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) +static void vhost_vdpa_unmap(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, + u64 iova, u64 size) { - struct vhost_dev *dev = &v->vdev; struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; - vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1); + vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1); if (ops->dma_map) { ops->dma_unmap(vdpa, iova, size); } else if (ops->set_map) { if (!v->in_batch) - ops->set_map(vdpa, dev->iotlb); + ops->set_map(vdpa, iotlb); } else { iommu_unmap(v->domain, iova, size); } } static int vhost_vdpa_va_map(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) { struct vhost_dev *dev = &v->vdev; @@ -662,7 +668,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start; map_file->offset = offset; map_file->file = get_file(vma->vm_file); - ret = vhost_vdpa_map(v, map_iova, map_size, uaddr, + ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr, perm, map_file); if (ret) { fput(map_file->file); @@ -675,7 +681,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, map_iova += map_size; } if (ret) - vhost_vdpa_unmap(v, iova, map_iova - iova); + vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova); mmap_read_unlock(dev->mm); @@ -683,6 +689,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, } static int vhost_vdpa_pa_map(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) { struct vhost_dev *dev = &v->vdev; @@ -746,7 +753,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, if (last_pfn && (this_pfn != last_pfn + 1)) { /* Pin a contiguous chunk of memory */ csize = PFN_PHYS(last_pfn - map_pfn + 1); - ret = vhost_vdpa_map(v, iova, csize, + ret = vhost_vdpa_map(v, iotlb, iova, csize, PFN_PHYS(map_pfn), perm, NULL); if (ret) { @@ -776,7 +783,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, } /* Pin the rest chunk */ - ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1), + ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1), PFN_PHYS(map_pfn), perm, NULL); out: if (ret) { @@ -796,7 +803,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, for (pfn = map_pfn; pfn <= last_pfn; pfn++) unpin_user_page(pfn_to_page(pfn)); } - vhost_vdpa_unmap(v, start, size); + vhost_vdpa_unmap(v, iotlb, start, size); } unlock: mmap_read_unlock(dev->mm); @@ -807,11 +814,10 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, } static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, struct vhost_iotlb_msg *msg) { - struct vhost_dev *dev = &v->vdev; struct vdpa_device *vdpa = v->vdpa; - struct vhost_iotlb *iotlb = dev->iotlb; if (msg->iova < v->range.first || !msg->size || msg->iova > U64_MAX - msg->size + 1 || @@ -823,10 +829,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, return -EEXIST; if (vdpa->use_va) - return vhost_vdpa_va_map(v, msg->iova, msg->size, + return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size, msg->uaddr, msg->perm); - return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr, + return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr, msg->perm); } @@ -836,6 +842,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + struct vhost_iotlb *iotlb = dev->iotlb; int r = 0; mutex_lock(&dev->mutex); @@ -846,17 +853,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, switch (msg->type) { case VHOST_IOTLB_UPDATE: - r = vhost_vdpa_process_iotlb_update(v, msg); + r = vhost_vdpa_process_iotlb_update(v, iotlb, msg); break; case VHOST_IOTLB_INVALIDATE: - vhost_vdpa_unmap(v, msg->iova, msg->size); + vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size); break; case VHOST_IOTLB_BATCH_BEGIN: v->in_batch = true; break; case VHOST_IOTLB_BATCH_END: if (v->in_batch && ops->set_map) - ops->set_map(vdpa, dev->iotlb); + ops->set_map(vdpa, iotlb); v->in_batch = false; break; default: -- 2.25.0