Provide begin_cpu_access() and end_cpu_access() dma_buf_ops callbacks for cache synchronisation on exported buffers. V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers. dma-sg allocates memory using the page allocator directly, so there is no memory consistency guarantee. Change-Id: Ia0d9d72a8c2a9fe3264ac148f59201573289ed2c Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> --- .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 6db60e9d5183..bfc99a0cb7b9 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) vb2_dma_sg_put(dbuf->priv); } +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + struct sg_table *sgt = buf->dma_sgt; + + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + return 0; +} + +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + struct sg_table *sgt = buf->dma_sgt; + + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + return 0; +} + static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) { struct vb2_dma_sg_buf *buf = dbuf->priv; @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { .detach = vb2_dma_sg_dmabuf_ops_detach, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, + .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, + .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, .vmap = vb2_dma_sg_dmabuf_ops_vmap, .mmap = vb2_dma_sg_dmabuf_ops_mmap, .release = vb2_dma_sg_dmabuf_ops_release, -- 2.25.0.341.g760bfbb309-goog