From: Nicolas Saenz Julienne <nsaenzjulienne@xxxxxxx> upstream 81e9d894e03f9a279102c7aac62ea7cbf9949f4b commit. When allocating DMA memory from a pool, the core can only guess which atomic pool will fit a device's constraints. If it doesn't, get a safer atomic pool and try again. Fixes: c84dc6e68a1d ("dma-pool: add additional coherent pools to map to gfp mask") Reported-by: Jeremy Linton <jeremy.linton@xxxxxxx> Suggested-by: Robin Murphy <robin.murphy@xxxxxxx> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@xxxxxxx> Signed-off-by: Christoph Hellwig <hch@xxxxxx> Signed-off-by: Peter Gonda <pgonda@xxxxxxxxxx> --- kernel/dma/pool.c | 57 ++++++++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 5b9eaa2b498d..d48d9acb585f 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -240,39 +240,56 @@ static inline struct gen_pool *dma_guess_pool(struct device *dev, void *dma_alloc_from_pool(struct device *dev, size_t size, struct page **ret_page, gfp_t flags) { - struct gen_pool *pool = dma_guess_pool(dev, NULL); - unsigned long val; + struct gen_pool *pool = NULL; + unsigned long val = 0; void *ptr = NULL; - - if (!pool) { - WARN(1, "%pGg atomic pool not initialised!\n", &flags); - return NULL; + phys_addr_t phys; + + while (1) { + pool = dma_guess_pool(dev, pool); + if (!pool) { + WARN(1, "Failed to get suitable pool for %s\n", + dev_name(dev)); + break; + } + + val = gen_pool_alloc(pool, size); + if (!val) + continue; + + phys = gen_pool_virt_to_phys(pool, val); + if (dma_coherent_ok(dev, phys, size)) + break; + + gen_pool_free(pool, val, size); + val = 0; } - val = gen_pool_alloc(pool, size); - if (likely(val)) { - phys_addr_t phys = gen_pool_virt_to_phys(pool, val); + if (val) { *ret_page = pfn_to_page(__phys_to_pfn(phys)); ptr = (void *)val; memset(ptr, 0, size); - } else { - WARN_ONCE(1, "DMA coherent pool depleted, increase size " - "(recommended min coherent_pool=%zuK)\n", - gen_pool_size(pool) >> 9); + + if (gen_pool_avail(pool) < atomic_pool_size) + schedule_work(&atomic_pool_work); } - if (gen_pool_avail(pool) < atomic_pool_size) - schedule_work(&atomic_pool_work); return ptr; } bool dma_free_from_pool(struct device *dev, void *start, size_t size) { - struct gen_pool *pool = dma_guess_pool(dev, NULL); + struct gen_pool *pool = NULL; - if (!pool || !gen_pool_has_addr(pool, (unsigned long)start, size)) - return false; - gen_pool_free(pool, (unsigned long)start, size); - return true; + while (1) { + pool = dma_guess_pool(dev, pool); + if (!pool) + return false; + + if (gen_pool_has_addr(pool, (unsigned long)start, size)) { + gen_pool_free(pool, (unsigned long)start, size); + return true; + } + } } -- 2.28.0.618.gf4bc123cb7-goog