Make all cache invalidation conditional on sg_has_page(). Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/powerpc/kernel/dma.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 35e4dcc..cece40b 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -135,7 +135,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_offset(dev); sg->dma_length = sg->length; - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + if (sg_has_page(sg)) { + __dma_sync_page(sg_page(sg), sg->offset, sg->length, + direction); + } } return nents; @@ -200,7 +203,10 @@ static inline void dma_direct_sync_sg(struct device *dev, int i; for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + if (sg_has_page(sg)) { + __dma_sync_page(sg_page(sg), sg->offset, sg->length, + direction); + } } static inline void dma_direct_sync_single(struct device *dev, -- 1.9.1