Make all cache invalidation conditional on sg_has_page() and use sg_phys to get the physical address directly. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/arc/include/asm/dma-mapping.h | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 2d28ba9..42eb526 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -108,9 +108,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); + for_each_sg(sg, s, nents, i) { + if (sg_has_page(s)) { + _dma_cache_sync((unsigned long)sg_virt(s), s->length, + dir); + } + s->dma_address = sg_phys(s); + } return nents; } @@ -163,8 +167,12 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, int i; struct scatterlist *sg; - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); + for_each_sg(sglist, sg, nelems, i) { + if (sg_has_page(sg)) { + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, + dir); + } + } } static inline void @@ -174,8 +182,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int i; struct scatterlist *sg; - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); + for_each_sg(sglist, sg, nelems, i) { + if (sg_has_page(sg)) { + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, + dir); + } + } } static inline int dma_supported(struct device *dev, u64 dma_mask) -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-metag" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html