If we got back an allocation that wasn't inside the support coherent mask, retry the allocation using GFP_DMA. Based on the x86 code. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- lib/dma-direct.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/dma-direct.c b/lib/dma-direct.c index ab81de3ac1d3..f8467cb3d89a 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -28,6 +28,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return true; } +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +{ + return phys_to_dma(dev, phys) + size <= dev->coherent_dma_mask; +} + static void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -35,11 +40,29 @@ static void *dma_direct_alloc(struct device *dev, size_t size, int page_order = get_order(size); struct page *page = NULL; +again: /* CMA can be used only in the context which permits sleeping */ - if (gfpflags_allow_blocking(gfp)) + if (gfpflags_allow_blocking(gfp)) { page = dma_alloc_from_contiguous(dev, count, page_order, gfp); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } if (!page) page = alloc_pages_node(dev_to_node(dev), gfp, page_order); + + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + __free_pages(page, page_order); + page = NULL; + + if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && + !(gfp & GFP_DMA)) { + gfp = (gfp & ~GFP_DMA32) | GFP_DMA; + goto again; + } + } + if (!page) return NULL; -- 2.14.2