Split the operation of msm_gem_get_iova into two operations: 1) allocate an iova and 2) map (pin) the backing memory int the iommu. This is the first step toward allowing memory pinning to occur independently of the iova management. Signed-off-by: Jordan Crouse <jcrouse@xxxxxxxxxxxxxx> --- drivers/gpu/drm/msm/msm_drv.h | 2 + drivers/gpu/drm/msm/msm_gem.c | 80 ++++++++++++++++++++----------- drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_gem_vma.c | 44 ++++++++++++----- 4 files changed, 86 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 3a0f2119127b..63b28b69b2ab 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -241,6 +241,8 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); void msm_atomic_state_clear(struct drm_atomic_state *state); void msm_atomic_state_free(struct drm_atomic_state *state); +int msm_gem_init_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, int npages); void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); int msm_gem_map_vma(struct msm_gem_address_space *aspace, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ff75e557b708..189d7f0f1aad 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -357,52 +357,76 @@ put_iova(struct drm_gem_object *obj) } } -/* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, +static int msm_gem_get_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; int ret = 0; - mutex_lock(&msm_obj->lock); - - if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - mutex_unlock(&msm_obj->lock); - return -EBUSY; - } + WARN_ON(!mutex_is_locked(&msm_obj->lock)); vma = lookup_vma(obj, aspace); if (!vma) { - struct page **pages; - vma = add_vma(obj, aspace); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto unlock; - } + if (IS_ERR(vma)) + return PTR_ERR(vma); - pages = get_pages(obj); - if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - goto fail; + ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); + if (ret) { + del_vma(vma); + return ret; } - - ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, - obj->size >> PAGE_SHIFT); - if (ret) - goto fail; } *iova = vma->iova; - - mutex_unlock(&msm_obj->lock); return 0; +} + +static int msm_gem_pin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *vma; + struct page **pages; + + WARN_ON(!mutex_is_locked(&msm_obj->lock)); + + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) + return -EBUSY; + + vma = lookup_vma(obj, aspace); + if (WARN_ON(!vma)) + return -EINVAL; + + pages = get_pages(obj); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + return msm_gem_map_vma(aspace, vma, msm_obj->sgt, + obj->size >> PAGE_SHIFT); +} + + +/* get iova, taking a reference. Should have a matching put */ +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + u64 local; + int ret; + + mutex_lock(&msm_obj->lock); + + ret = msm_gem_get_iova_locked(obj, aspace, &local); + + if (!ret) + ret = msm_gem_pin_iova(obj, aspace); + + if (!ret) + *iova = local; -fail: - del_vma(vma); -unlock: mutex_unlock(&msm_obj->lock); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c5d9bd3e47a8..32c13e6559b5 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -41,6 +41,7 @@ struct msm_gem_vma { uint64_t iova; struct msm_gem_address_space *aspace; struct list_head list; /* node in msm_gem_object::vmas */ + bool mapped; }; struct msm_gem_object { diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 704ae7e69500..c4c42bf0db0e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -55,6 +55,7 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, spin_unlock(&aspace->lock); vma->iova = 0; + vma->mapped = false; msm_gem_address_space_put(aspace); } @@ -63,14 +64,37 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt, int npages) { - int ret; + unsigned size = npages << PAGE_SHIFT; + int ret = 0; - spin_lock(&aspace->lock); - if (WARN_ON(drm_mm_node_allocated(&vma->node))) { - spin_unlock(&aspace->lock); + if (WARN_ON(!vma->iova)) + return -EINVAL; + + if (vma->mapped) return 0; - } + vma->mapped = true; + + if (aspace->mmu) + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, + size, IOMMU_READ | IOMMU_WRITE); + + if (ret) + vma->mapped = false; + + return ret; +} + +/* Initialize a new vma and allocate an iova for it */ +int msm_gem_init_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, int npages) +{ + int ret; + + if (WARN_ON(vma->iova)) + return -EBUSY; + + spin_lock(&aspace->lock); ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); spin_unlock(&aspace->lock); @@ -78,17 +102,11 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, return ret; vma->iova = vma->node.start << PAGE_SHIFT; + vma->mapped = false; - if (aspace->mmu) { - unsigned size = npages << PAGE_SHIFT; - ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, - size, IOMMU_READ | IOMMU_WRITE); - } - - /* Get a reference to the aspace to keep it around */ kref_get(&aspace->kref); - return ret; + return 0; } struct msm_gem_address_space * -- 2.18.0