Make all cache invalidation conditional on sg_has_page() and use sg_phys to get the physical address directly, bypassing the noop page_to_bus. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/avr32/include/asm/dma-mapping.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index ae7ac92..a662ce2 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -216,11 +216,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) { - char *virt; - - sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; - virt = sg_virt(sg); - dma_cache_sync(dev, virt, sg->length, direction); + sg->dma_address = sg_phys(sg); + if (sg_has_page(sg)) + dma_cache_sync(dev, sg_virt(sg), sg->length, direction); } return nents; @@ -328,8 +326,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int i; struct scatterlist *sg; - for_each_sg(sglist, sg, nents, i) - dma_cache_sync(dev, sg_virt(sg), sg->length, direction); + for_each_sg(sglist, sg, nents, i) { + if (sg_has_page(sg)) + dma_cache_sync(dev, sg_virt(sg), sg->length, direction); + } } /* Now for the API extensions over the pci_ one */ -- 1.9.1