Re: [PATCH 3/9] drm: remove prime sg_table caching

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 08.05.19 um 11:57 schrieb Daniel Vetter:
On Tue, May 07, 2019 at 10:13:32AM +0200, Christian König wrote:
That is now done by the DMA-buf helpers instead.

Signed-off-by: Christian König <christian.koenig@xxxxxxx>
---
  drivers/gpu/drm/drm_prime.c | 76 ++++++++-----------------------------
  1 file changed, 16 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..90f5230cc0d5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -86,11 +86,6 @@ struct drm_prime_member {
  	struct rb_node handle_rb;
  };
-struct drm_prime_attachment {
-	struct sg_table *sgt;
-	enum dma_data_direction dir;
-};
-
  static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
  				    struct dma_buf *dma_buf, uint32_t handle)
  {
@@ -188,25 +183,16 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
   * @dma_buf: buffer to attach device to
   * @attach: buffer attachment data
   *
- * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
- * device specific attachment. This can be used as the &dma_buf_ops.attach
- * callback.
+ * Calls &drm_driver.gem_prime_pin for device specific handling. This can be
+ * used as the &dma_buf_ops.attach callback.
   *
   * Returns 0 on success, negative error code on failure.
   */
  int drm_gem_map_attach(struct dma_buf *dma_buf,
  		       struct dma_buf_attachment *attach)
  {
-	struct drm_prime_attachment *prime_attach;
  	struct drm_gem_object *obj = dma_buf->priv;
- prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
-	if (!prime_attach)
-		return -ENOMEM;
-
-	prime_attach->dir = DMA_NONE;
-	attach->priv = prime_attach;
-
  	return drm_gem_pin(obj);
  }
  EXPORT_SYMBOL(drm_gem_map_attach);
@@ -222,26 +208,8 @@ EXPORT_SYMBOL(drm_gem_map_attach);
  void drm_gem_map_detach(struct dma_buf *dma_buf,
  			struct dma_buf_attachment *attach)
  {
-	struct drm_prime_attachment *prime_attach = attach->priv;
  	struct drm_gem_object *obj = dma_buf->priv;
- if (prime_attach) {
-		struct sg_table *sgt = prime_attach->sgt;
-
-		if (sgt) {
-			if (prime_attach->dir != DMA_NONE)
-				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
-						   sgt->nents,
-						   prime_attach->dir,
-						   DMA_ATTR_SKIP_CPU_SYNC);
-			sg_free_table(sgt);
-		}
-
-		kfree(sgt);
-		kfree(prime_attach);
-		attach->priv = NULL;
-	}
-
  	drm_gem_unpin(obj);
  }
  EXPORT_SYMBOL(drm_gem_map_detach);
@@ -286,39 +254,22 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
  struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
  				     enum dma_data_direction dir)
  {
-	struct drm_prime_attachment *prime_attach = attach->priv;
  	struct drm_gem_object *obj = attach->dmabuf->priv;
  	struct sg_table *sgt;
- if (WARN_ON(dir == DMA_NONE || !prime_attach))
+	if (WARN_ON(dir == DMA_NONE))
  		return ERR_PTR(-EINVAL);
- /* return the cached mapping when possible */
-	if (prime_attach->dir == dir)
-		return prime_attach->sgt;
-
-	/*
-	 * two mappings with different directions for the same attachment are
-	 * not allowed
-	 */
-	if (WARN_ON(prime_attach->dir != DMA_NONE))
-		return ERR_PTR(-EBUSY);
-
  	if (obj->funcs)
  		sgt = obj->funcs->get_sg_table(obj);
  	else
  		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
- if (!IS_ERR(sgt)) {
-		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-				      DMA_ATTR_SKIP_CPU_SYNC)) {
-			sg_free_table(sgt);
-			kfree(sgt);
-			sgt = ERR_PTR(-ENOMEM);
-		} else {
-			prime_attach->sgt = sgt;
-			prime_attach->dir = dir;
-		}
+	if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+			      DMA_ATTR_SKIP_CPU_SYNC)) {
+		sg_free_table(sgt);
+		kfree(sgt);
+		sgt = ERR_PTR(-ENOMEM);
  	}
return sgt;
@@ -331,14 +282,19 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
   * @sgt: scatterlist info of the buffer to unmap
   * @dir: direction of DMA transfer
   *
- * Not implemented. The unmap is done at drm_gem_map_detach().  This can be
- * used as the &dma_buf_ops.unmap_dma_buf callback.
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
   */
  void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
  			   struct sg_table *sgt,
  			   enum dma_data_direction dir)
  {
-	/* nothing to be done here */
+	if (!sgt)
+		return;
This copypasta isn't needed anymore, it's a caller bug if you don't supply
an sgt here. The old caching code only needed it because it only cached
on-demand in the map callback. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx>

Thanks for clearing that up, I was already wondering where that stuff came from.

In this case going to remove the extra NULL checks from the amdgpu code as well.

Christian.



+
+	dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+			   DMA_ATTR_SKIP_CPU_SYNC);
+	sg_free_table(sgt);
+	kfree(sgt);
  }
  EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
--
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux