+
+ } else if (dma_buf_is_dynamic(attach->dmabuf)) {
+ reservation_object_lock(attach->dmabuf->resv, NULL);
+ r = dma_buf_pin(attach);
+ if (r) {
+ reservation_object_unlock(attach->dmabuf->resv);
+ return ERR_PTR(r);
+ }
+ }
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (dma_buf_attachment_is_dynamic(attach)) {
+ list_add(&attach->node, &attach->dmabuf->attachments);
+
+ } else if (dma_buf_is_dynamic(attach->dmabuf)) {
+ if (IS_ERR(sg_table))
+ dma_buf_unpin(attach);
+ reservation_object_unlock(attach->dmabuf->resv);
+ }
+
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
attach->dir = direction;
@@ -802,10 +945,41 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (attach->sgt == sg_table)
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ reservation_object_assert_held(attach->dmabuf->resv);
+ else if (dma_buf_is_dynamic(attach->dmabuf))
+ reservation_object_lock(attach->dmabuf->resv, NULL);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !dma_buf_attachment_is_dynamic(attach)) {
+ dma_buf_unpin(attach);
+ reservation_object_unlock(attach->dmabuf->resv);
+ }
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+/**
+ * dma_buf_invalidate_mappings - invalidate all mappings of this dma_buf
+ *
+ * @dmabuf: [in] buffer which mappings should be invalidated
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_invalidate_mappings(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ reservation_object_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops && attach->importer_ops->invalidate)
+ attach->importer_ops->invalidate(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_invalidate_mappings);
+
/**
* DOC: cpu access
*
@@ -1225,10 +1399,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
+ reservation_object_lock(buf_obj->resv, NULL);
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ reservation_object_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 01ad5b942a6f..f9c96bf56bc8 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -92,14 +92,40 @@ struct dma_buf_ops {
*/
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
+ /**
+ * @pin:
+ *
+ * This is called by dma_buf_pin and lets the exporter know that the
+ * DMA-buf can't be moved any more.
+ *
+ * This is called with the dmabuf->resv object locked.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
/**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -120,6 +146,9 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the pin/unpin callbacks are implemented.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -137,9 +166,6 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -330,6 +356,35 @@ struct dma_buf {
} cb_excl, cb_shared;
};
+/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ * @invalidate: [optional] invalidate all mappings made using this attachment.
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @invalidate:
+ *
+ * If the invalidate callback is provided the framework can avoid
+ * pinning the backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing invalidation.
+ *
+ * After the callback all existing mappings are still valid until all
+ * fences in the dma_bufs reservation object are signaled. After getting an
+ * invalidation callback all mappings should be destroyed by the importer using
+ * the normal dma_buf_unmap_attachment() function as soon as possible.
+ *
+ * New mappings can be created immediately, but can't be used before the
+ * exclusive fence in the dma_bufs reservation object is signaled.
+ */
+ void (*invalidate)(struct dma_buf_attachment *attach);
+};
+
/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
@@ -338,6 +393,8 @@ struct dma_buf {
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
+ * @importer_ops: importer operations for this attachment.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -355,6 +412,9 @@ struct dma_buf_attachment {
struct sg_table *sgt;
enum dma_data_direction dir;
void *priv;
+
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
};
/**
@@ -405,10 +465,42 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to create dynamic sg table mappings
+ * for each attachment. False if only a single static sg table should be used.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return !!dmabuf->ops->pin;
+}
+
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappinsg
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to use dynamic sg table mappings and
+ * calls the map/unmap functions with the reservation object locked.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return attach->importer_ops && attach->importer_ops->invalidate;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -420,6 +512,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_invalidate_mappings(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
--
2.17.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx