On 2020-06-18 16:39, Marek Szyprowski wrote:
Use recently introduced common wrappers operating directly on the struct
sg_table objects and scatterlist page iterators to make the code a bit
more compact, robust, easier to follow and copy/paste safe.
No functional change, because the code already properly did all the
scaterlist related calls.
Signed-off-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
---
.../common/videobuf2/videobuf2-dma-contig.c | 41 ++++++++-----------
.../media/common/videobuf2/videobuf2-dma-sg.c | 32 ++++++---------
.../common/videobuf2/videobuf2-vmalloc.c | 12 ++----
3 files changed, 34 insertions(+), 51 deletions(-)
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index f4b4a7c135eb..ba01a8692d88 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -48,16 +48,15 @@ struct vb2_dc_buf {
static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
- struct scatterlist *s;
dma_addr_t expected = sg_dma_address(sgt->sgl);
- unsigned int i;
+ struct sg_dma_page_iter dma_iter;
unsigned long size = 0;
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- if (sg_dma_address(s) != expected)
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (sg_page_iter_dma_address(&dma_iter) != expected)
break;
- expected = sg_dma_address(s) + sg_dma_len(s);
- size += sg_dma_len(s);
+ expected += PAGE_SIZE;
+ size += PAGE_SIZE;
Same comment as for the DRM version. In fact, given that it's the same
function with the same purpose, might it be worth hoisting out as a
generic helper for the sg_table API itself?
}
return size;
}
[...]
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 92072a08af25..6ddf953efa11 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -142,9 +142,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
As 0-day's explosions of nonsense imply, there's a rogue bracket here...
goto fail_map;
buf->handler.refcount = &buf->refcount;
@@ -180,8 +179,8 @@ static void vb2_dma_sg_put(void *buf_priv)
if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -202,8 +201,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
@@ -215,7 +213,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
@@ -258,9 +256,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
... and here.
Robin.