[PATCH 11/17] drm/msm: Add a helper function for in-kernel buffer allocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Nearly all of the buffer allocations for kernel allocate an buffer object,
virtual address and GPU iova at the same time. Make a helper function to
handle the details.

Signed-off-by: Jordan Crouse <jcrouse@xxxxxxxxxxxxxx>
---
 drivers/gpu/drm/msm/adreno/a5xx_gpu.c   | 22 +++-------------
 drivers/gpu/drm/msm/adreno/a5xx_power.c | 14 +++-------
 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 26 +++++--------------
 drivers/gpu/drm/msm/msm_drv.h           |  6 +++++
 drivers/gpu/drm/msm/msm_fbdev.c         | 35 ++++++++++---------------
 drivers/gpu/drm/msm/msm_gem.c           | 46 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/msm/msm_ringbuffer.c    | 12 ++++-----
 7 files changed, 86 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 3acbba1..e533007 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -269,28 +269,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
 		const struct firmware *fw, u64 *iova)
 {
-	struct drm_device *drm = gpu->dev;
 	struct drm_gem_object *bo;
 	void *ptr;
 
-	bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
-	if (IS_ERR(bo))
-		return bo;
+	ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
 
-	ptr = msm_gem_get_vaddr(bo);
-	if (!ptr) {
-		drm_gem_object_unreference(bo);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (iova) {
-		int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
-
-		if (ret) {
-			drm_gem_object_unreference(bo);
-			return ERR_PTR(ret);
-		}
-	}
+	if (IS_ERR(ptr))
+		return ERR_CAST(ptr);
 
 	memcpy(ptr, &fw->data[4], fw->size - 4);
 
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 87af6ee..04aab1d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
 	 */
 	bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
 
-	a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
-	if (IS_ERR(a5xx_gpu->gpmu_bo))
-		goto err;
-
-	if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
-			&a5xx_gpu->gpmu_iova))
-		goto err;
-
-	ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
-	if (!ptr)
+	ptr = msm_gem_kernel_new_locked(drm, bosize,
+		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+		&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+	if (IS_ERR(ptr))
 		goto err;
 
 	while (cmds_size > 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 634e724..6d65684 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -384,29 +384,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		return ret;
 	}
 
-	adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
-			MSM_BO_UNCACHED);
-	if (IS_ERR(adreno_gpu->memptrs_bo)) {
-		ret = PTR_ERR(adreno_gpu->memptrs_bo);
-		adreno_gpu->memptrs_bo = NULL;
-		dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
-		return ret;
-	}
+	adreno_gpu->memptrs = msm_gem_kernel_new(drm,
+		sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
+		&adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
 
-	adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
 	if (IS_ERR(adreno_gpu->memptrs)) {
-		dev_err(drm->dev, "could not vmap memptrs\n");
-		return -ENOMEM;
-	}
-
-	ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
-			&adreno_gpu->memptrs_iova);
-	if (ret) {
-		dev_err(drm->dev, "could not map memptrs: %d\n", ret);
-		return ret;
+		ret = PTR_ERR(adreno_gpu->memptrs);
+		adreno_gpu->memptrs = NULL;
+		dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
 	}
 
-	return 0;
+	return ret;
 }
 
 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 0fa9a7d..696413d 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -235,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
 
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5ecf4ff..bfb1fe7 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -78,6 +78,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 	struct drm_framebuffer *fb = NULL;
 	struct fb_info *fbi = NULL;
 	struct drm_mode_fb_cmd2 mode_cmd = {0};
+	void *ptr;
 	uint64_t paddr;
 	int ret, size;
 
@@ -97,11 +98,18 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 	/* allocate backing bo */
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 	DBG("allocating %d bytes for fb %d", size, dev->primary->index);
-	fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
-			MSM_BO_WC | MSM_BO_STOLEN);
-	if (IS_ERR(fbdev->bo)) {
-		ret = PTR_ERR(fbdev->bo);
-		fbdev->bo = NULL;
+
+	/*
+	 * NOTE: if we can be guaranteed to be able to map buffer
+	 * in panic (ie. lock-safe, etc) we could avoid pinning the
+	 * buffer now:
+	 */
+	ptr = msm_gem_kernel_new(dev, size,
+		MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN,
+		priv->kms->aspace, &fbdev->bo, &paddr);
+
+	if (IS_ERR(ptr)) {
+		ret = PTR_ERR(ptr);
 		dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
 		goto fail;
 	}
@@ -119,17 +127,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
 	mutex_lock(&dev->struct_mutex);
 
-	/*
-	 * NOTE: if we can be guaranteed to be able to map buffer
-	 * in panic (ie. lock-safe, etc) we could avoid pinning the
-	 * buffer now:
-	 */
-	ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
-	if (ret) {
-		dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
-		goto fail_unlock;
-	}
-
 	fbi = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
@@ -153,11 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
 	dev->mode_config.fb_base = paddr;
 
-	fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
-	if (IS_ERR(fbi->screen_base)) {
-		ret = PTR_ERR(fbi->screen_base);
-		goto fail_unlock;
-	}
+	fbi->screen_base = ptr;
 	fbi->screen_size = fbdev->bo->size;
 	fbi->fix.smem_start = paddr;
 	fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f3554..6a4e8d0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1018,3 +1018,49 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	drm_gem_object_unreference_unlocked(obj);
 	return ERR_PTR(ret);
 }
+
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+	void *vaddr;
+	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+	int ret;
+
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	if (iova) {
+		ret = msm_gem_get_iova(obj, aspace, iova);
+		if (ret) {
+			drm_gem_object_unreference(obj);
+			return ERR_PTR(ret);
+		}
+	}
+
+	vaddr = msm_gem_get_vaddr(obj);
+	if (!vaddr) {
+		msm_gem_put_iova(obj, aspace);
+		drm_gem_object_unreference(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (bo)
+		*bo = obj;
+
+	return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova)
+{
+	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+		uint32_t flags, struct msm_gem_address_space *aspace,
+		struct drm_gem_object **bo, uint64_t *iova)
+{
+	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
+}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 791bca3..bf065a5 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
 	}
 
 	ring->gpu = gpu;
-	ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
-	if (IS_ERR(ring->bo)) {
-		ret = PTR_ERR(ring->bo);
-		ring->bo = NULL;
-		goto fail;
-	}
 
-	ring->start = msm_gem_get_vaddr(ring->bo);
+	/* Pass NULL for the iova pointer - we will map it later */
+	ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
+		gpu->aspace, &ring->bo, NULL);
+
 	if (IS_ERR(ring->start)) {
 		ret = PTR_ERR(ring->start);
+		ring->start = 0;
 		goto fail;
 	}
 	ring->end   = ring->start + (size / 4);
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [Linux for Sparc]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux