The Documentation/DMA-API-HOWTO.txt states that dma_map_sg returns the numer of the created entries in the DMA address space. However the subsequent calls to dma_sync_sg_for_{device,cpu} and dma_unmap_sg must be called with the original number of entries passed to dma_map_sg. The sg_table->nents in turn holds the result of the dma_map_sg call as stated in include/linux/scatterlist.h. Adapt the code to obey those rules. Signed-off-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx> --- For more information, see '[PATCH v2 00/21] DRM: fix struct sg_table nents vs. orig_nents misuse' thread: https://lkml.org/lkml/2020/5/4/373 --- drivers/gpu/drm/msm/msm_gem.c | 8 ++++---- drivers/gpu/drm/msm/msm_iommu.c | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5a6a79f..54c3bbb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -54,10 +54,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj) if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + msm_obj->sgt->orig_nents, DMA_BIDIRECTIONAL); } else { dma_map_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + msm_obj->sgt->orig_nents, DMA_BIDIRECTIONAL); } } @@ -67,10 +67,10 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj) if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + msm_obj->sgt->orig_nents, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + msm_obj->sgt->orig_nents, DMA_BIDIRECTIONAL); } } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index ad58cfe..b0ca084 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -43,7 +43,8 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; - ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); + ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->orig_nents, + prot); WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; -- 1.9.1