Prevent a possible endless loop with DMAPOOL_DEBUG enabled if a buggy driver corrupts DMA pool memory. Signed-off-by: Tony Battersby <tonyb@xxxxxxxxxxxxxxx> --- --- linux/mm/dmapool.c.orig 2018-08-02 10:14:25.000000000 -0400 +++ linux/mm/dmapool.c 2018-08-02 10:16:17.000000000 -0400 @@ -449,16 +449,35 @@ void dma_pool_free(struct dma_pool *pool { void *page_vaddr = vaddr - offset; unsigned int chain = page->dma_free_o; + size_t total_free = 0; + while (chain < pool->allocation) { - if (chain != offset) { - chain = *(int *)(page_vaddr + chain); - continue; + if (unlikely(chain == offset)) { + spin_unlock_irqrestore(&pool->lock, flags); + dev_err(pool->dev, + "dma_pool_free %s, dma %pad already free\n", + pool->name, &dma); + return; + } + + /* + * The calculation of the number of blocks per + * allocation is actually more complicated than this + * because of the boundary value. But this comparison + * does not need to be exact; it just needs to prevent + * an endless loop in case a buggy driver causes a + * circular loop in the freelist. + */ + total_free += pool->size; + if (unlikely(total_free >= pool->allocation)) { + spin_unlock_irqrestore(&pool->lock, flags); + dev_err(pool->dev, + "dma_pool_free %s, freelist corrupted\n", + pool->name); + return; } - spin_unlock_irqrestore(&pool->lock, flags); - dev_err(pool->dev, - "dma_pool_free %s, dma %pad already free\n", - pool->name, &dma); - return; + + chain = *(int *)(page_vaddr + chain); } } memset(vaddr, POOL_POISON_FREED, pool->size);