[PATCH V1 8/8] vfio/type1: change dma owner

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Implement VFIO_CHANGE_DMA_OWNER in the type1 iommu.

Signed-off-by: Steve Sistare <steven.sistare@xxxxxxxxxx>
---
 drivers/vfio/vfio_iommu_type1.c | 119 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 119 insertions(+)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index fbea2b5..55ba1e7 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1509,6 +1509,112 @@ static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
 	return list_empty(iova);
 }
 
+/*
+ * Return true if mm1 vaddr1 maps the same memory object as mm2 vaddr2.
+ * This does not prevent other tasks from concurrently modifying mappings
+ * and invalidating this test, but that would be an application bug.
+ */
+static bool same_file_mapping(struct mm_struct *mm1, unsigned long vaddr1,
+			      struct mm_struct *mm2, unsigned long vaddr2)
+{
+	const unsigned long mask = VM_READ | VM_WRITE | VM_EXEC | VM_SHARED;
+	struct inode *inode1 = NULL, *inode2 = NULL;
+	unsigned long pgoff1, len1, flags1;
+	unsigned long pgoff2, len2, flags2;
+	struct vm_area_struct *vma;
+
+	mmap_read_lock(mm1);
+	vma = find_vma(mm1, vaddr1);
+	if (vma && vma->vm_file) {
+		inode1 = vma->vm_file->f_inode;
+		pgoff1 = vma->vm_pgoff;
+		len1 = vma->vm_end - vma->vm_start;
+		flags1 = vma->vm_flags & mask;
+	}
+	mmap_read_unlock(mm1);
+
+	mmap_read_lock(mm2);
+	vma = find_vma(mm2, vaddr2);
+	if (vma && vma->vm_file) {
+		inode2 = vma->vm_file->f_inode;
+		pgoff2 = vma->vm_pgoff;
+		len2 = vma->vm_end - vma->vm_start;
+		flags2 = vma->vm_flags & mask;
+	}
+	mmap_read_unlock(mm2);
+
+	if (!inode1 || (inode1 != inode2) || (pgoff1 != pgoff2) ||
+	    (len1 != len2) || (flags1 != flags2)) {
+		pr_debug("vfio vma mismatch for old va %lx vs new va %lx\n",
+			 vaddr1, vaddr2);
+		return false;
+	} else {
+		return true;
+	}
+}
+
+static int change_dma_owner(struct vfio_iommu *iommu)
+{
+	struct rb_node *p;
+	struct vfio_dma *dma;
+	unsigned long new_vaddr;
+	uint64_t npages = 0;
+	int ret = 0, new_vaddrs = 0, total = 0;
+	bool new_lock_cap = capable(CAP_IPC_LOCK);
+	struct mm_struct *old_mm, *new_mm = current->mm;
+	struct task_struct *old_task = iommu->task;
+	struct task_struct *new_task = current->group_leader;
+
+	if (!old_task)
+		return 0;	/* nothing mapped, nothing to do */
+
+	old_mm = get_task_mm(old_task);
+	if (!old_mm)
+		return -ESRCH;
+
+	for (p = rb_first(&iommu->dma_list); p; p = rb_next(p)) {
+		dma = rb_entry(p, struct vfio_dma, node);
+		npages += dma->locked_vm;
+		total++;
+		new_vaddrs += (dma->new_vaddr != 0);
+
+		new_vaddr = dma->new_vaddr ? dma->new_vaddr : dma->vaddr;
+		if (!same_file_mapping(old_mm, dma->vaddr, new_mm, new_vaddr)) {
+			ret = -ENXIO;
+			goto out;
+		}
+	}
+
+	/* If any vaddrs change, all must change */
+	if (new_vaddrs && (total - new_vaddrs)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (npages) {
+		ret = mm_lock_vm(new_task, new_mm, new_lock_cap, npages);
+		if (ret)
+			goto out;
+		/* should always succeed */
+		mm_lock_vm(old_task, old_mm, dma->lock_cap, -npages);
+	}
+
+	for (p = rb_first(&iommu->dma_list); p; p = rb_next(p)) {
+		dma = rb_entry(p, struct vfio_dma, node);
+		dma->lock_cap = new_lock_cap;
+		if (dma->new_vaddr) {
+			dma->vaddr = dma->new_vaddr;
+			dma->new_vaddr = 0;
+		}
+	}
+
+	vfio_iommu_set_task(iommu, new_task);
+
+out:
+	mmput(old_mm);
+	return ret;
+}
+
 static int vfio_dma_do_map(struct vfio_iommu *iommu,
 			   struct vfio_iommu_type1_dma_map *map)
 {
@@ -2604,6 +2710,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
 	case VFIO_TYPE1v2_IOMMU:
 	case VFIO_TYPE1_NESTING_IOMMU:
 	case VFIO_UNMAP_ALL:
+	case VFIO_DMA_OWNER:
 		return 1;
 	case VFIO_DMA_CC_IOMMU:
 		if (!iommu)
@@ -2944,6 +3051,17 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
 	return -EINVAL;
 }
 
+static int vfio_iommu_type1_change_dma_owner(void *iommu_data)
+{
+	struct vfio_iommu *iommu = iommu_data;
+	int ret;
+
+	mutex_lock(&iommu->lock);
+	ret = change_dma_owner(iommu);
+	mutex_unlock(&iommu->lock);
+	return ret;
+}
+
 static void vfio_iommu_type1_close_dma_owner(void *iommu_data)
 {
 	struct vfio_iommu *iommu = iommu_data;
@@ -3136,6 +3254,7 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
 	.unregister_device	= vfio_iommu_type1_unregister_device,
 	.dma_rw			= vfio_iommu_type1_dma_rw,
 	.group_iommu_domain	= vfio_iommu_type1_group_iommu_domain,
+	.change_dma_owner	= vfio_iommu_type1_change_dma_owner,
 	.close_dma_owner	= vfio_iommu_type1_close_dma_owner,
 };
 
-- 
1.8.3.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux