Revert this dead code: commit 898b9eaeb3fe ("vfio/type1: block on invalid vaddr") Signed-off-by: Steve Sistare <steven.sistare@xxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> --- drivers/vfio/vfio_iommu_type1.c | 94 +++-------------------------------------- 1 file changed, 5 insertions(+), 89 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 1036736..eb2d243 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -72,7 +72,6 @@ struct vfio_iommu { unsigned int vaddr_invalid_count; uint64_t pgsize_bitmap; uint64_t num_non_pinned_groups; - wait_queue_head_t vaddr_wait; bool v2; bool nesting; bool dirty_page_tracking; @@ -154,8 +153,6 @@ struct vfio_regions { #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX) #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX) -#define WAITED 1 - static int put_pfn(unsigned long pfn, int prot); static struct vfio_iommu_group* @@ -606,61 +603,6 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, return ret; } -static int vfio_wait(struct vfio_iommu *iommu) -{ - DEFINE_WAIT(wait); - - prepare_to_wait(&iommu->vaddr_wait, &wait, TASK_KILLABLE); - mutex_unlock(&iommu->lock); - schedule(); - mutex_lock(&iommu->lock); - finish_wait(&iommu->vaddr_wait, &wait); - if (kthread_should_stop() || !iommu->container_open || - fatal_signal_pending(current)) { - return -EFAULT; - } - return WAITED; -} - -/* - * Find dma struct and wait for its vaddr to be valid. iommu lock is dropped - * if the task waits, but is re-locked on return. Return result in *dma_p. - * Return 0 on success with no waiting, WAITED on success if waited, and -errno - * on error. - */ -static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start, - size_t size, struct vfio_dma **dma_p) -{ - int ret = 0; - - do { - *dma_p = vfio_find_dma(iommu, start, size); - if (!*dma_p) - return -EINVAL; - else if (!(*dma_p)->vaddr_invalid) - return ret; - else - ret = vfio_wait(iommu); - } while (ret == WAITED); - - return ret; -} - -/* - * Wait for all vaddr in the dma_list to become valid. iommu lock is dropped - * if the task waits, but is re-locked on return. Return 0 on success with no - * waiting, WAITED on success if waited, and -errno on error. - */ -static int vfio_wait_all_valid(struct vfio_iommu *iommu) -{ - int ret = 0; - - while (iommu->vaddr_invalid_count && ret >= 0) - ret = vfio_wait(iommu); - - return ret; -} - /* * Attempt to pin pages. We really don't want to track all the pfns and * the iommu can only map chunks of consecutive pfns anyway, so get the @@ -861,7 +803,6 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, unsigned long remote_vaddr; struct vfio_dma *dma; bool do_accounting; - dma_addr_t iova; if (!iommu || !pages) return -EINVAL; @@ -878,22 +819,6 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, goto pin_done; } - /* - * Wait for all necessary vaddr's to be valid so they can be used in - * the main loop without dropping the lock, to avoid racing vs unmap. - */ -again: - if (iommu->vaddr_invalid_count) { - for (i = 0; i < npage; i++) { - iova = user_iova + PAGE_SIZE * i; - ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma); - if (ret < 0) - goto pin_done; - if (ret == WAITED) - goto again; - } - } - /* Fail if no dma_umap notifier is registered */ if (list_empty(&iommu->device_list)) { ret = -EINVAL; @@ -909,6 +834,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, for (i = 0; i < npage; i++) { unsigned long phys_pfn; + dma_addr_t iova; struct vfio_pfn *vpfn; iova = user_iova + PAGE_SIZE * i; @@ -1193,10 +1119,8 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) put_task_struct(dma->task); mmdrop(dma->mm); vfio_dma_bitmap_free(dma); - if (dma->vaddr_invalid) { + if (dma->vaddr_invalid) iommu->vaddr_invalid_count--; - wake_up_all(&iommu->vaddr_wait); - } kfree(dma); iommu->dma_avail++; } @@ -1674,7 +1598,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, dma->vaddr = vaddr; dma->vaddr_invalid = false; iommu->vaddr_invalid_count--; - wake_up_all(&iommu->vaddr_wait); } goto out_unlock; } else if (dma) { @@ -1757,10 +1680,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; - ret = vfio_wait_all_valid(iommu); - if (ret < 0) - return ret; - /* Arbitrarily pick the first domain in the list for lookups */ if (!list_empty(&iommu->domain_list)) d = list_first_entry(&iommu->domain_list, @@ -2651,7 +2570,6 @@ static void *vfio_iommu_type1_open(unsigned long arg) mutex_init(&iommu->lock); mutex_init(&iommu->device_list_lock); INIT_LIST_HEAD(&iommu->device_list); - init_waitqueue_head(&iommu->vaddr_wait); iommu->pgsize_bitmap = PAGE_MASK; INIT_LIST_HEAD(&iommu->emulated_iommu_groups); @@ -3148,13 +3066,12 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, struct vfio_dma *dma; bool kthread = current->mm == NULL; size_t offset; - int ret; *copied = 0; - ret = vfio_find_dma_valid(iommu, user_iova, 1, &dma); - if (ret < 0) - return ret; + dma = vfio_find_dma(iommu, user_iova, 1); + if (!dma) + return -EINVAL; if ((write && !(dma->prot & IOMMU_WRITE)) || !(dma->prot & IOMMU_READ)) @@ -3263,7 +3180,6 @@ static void vfio_iommu_type1_notify(void *iommu_data, mutex_lock(&iommu->lock); iommu->container_open = false; mutex_unlock(&iommu->lock); - wake_up_all(&iommu->vaddr_wait); } static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { -- 1.8.3.1