---
drivers/gpu/drm/drm_prime.c | 88 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 88 insertions(+)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index ca09ce7..e82a976 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -73,6 +73,9 @@
* Drivers should detect this situation and return back the gem object
* from the dma-buf private. Prime will do this automatically for drivers that
* use the drm_gem_prime_{import,export} helpers.
+ *
+ * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
+ * drivers which implement GEM interface.
*/
struct drm_prime_member {
@@ -180,6 +183,18 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
+/**
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
+ * @dma_buf: buffer to attach device to
+ * @target_dev: not used
+ * @attach: buffer attachment data
+ *
+ * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
+ * device specific attachment. This can be used as the &dma_buf_ops.attach
+ * callback.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
struct dma_buf_attachment *attach)
{
@@ -201,6 +216,14 @@ int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
}
EXPORT_SYMBOL(drm_gem_map_attach);
+/**
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
+ * @dma_buf: buffer to detach from
+ * @attach: attachment to be detached
+ *
+ * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
+ * callback.
+ */
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
@@ -255,6 +278,18 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
}
+/**
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
+ * @attach: attachment whose scatterlist is to be returned
+ * @dir: direction of DMA transfer
+ *
+ * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
+ * can be used as the &dma_buf_ops.map_dma_buf callback.
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ */
+
struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
@@ -294,6 +329,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL(drm_gem_map_dma_buf);
+/**
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ *
+ * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
+ * used as the &dma_buf_ops.unmap_dma_buf callback.
+ */
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
@@ -351,6 +392,15 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
+/**
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ *
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
+ * callback.
+ *
+ * Returns the kernel virtual address.
+ */
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -360,6 +410,14 @@ void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
+/**
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @vaddr: the virtual address of the buffer
+ *
+ * Releases a kernel virtual mapping. This can be used as the
+ * &dma_buf_ops.vunmap callback.
+ */
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -369,6 +427,11 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
+/**
+ * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
+ */
void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -376,6 +439,11 @@ void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
}
EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
+/**
+ * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
+ */
void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
@@ -383,12 +451,22 @@ void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
}
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
+/**
+ * drm_gem_dmabuf_kmap - map implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map callback.
+ */
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
+/**
+ * drm_gem_dmabuf_kunmap - unmap implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
+ */
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr)
{
@@ -396,6 +474,16 @@ void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
}
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
+/**
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @vma: virtual address range
+ *
+ * Provides memory mapping for the buffer. This can be used as the
+ * &dma_buf_ops.mmap callback.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;