Since tx_submit can be called from a hard IRQ, xfers_list must be protected with a lock to avoid concurency on the list's elements. Since at_xdmac_handle_cyclic() is called from a tasklet, spin_lock_irq is enough to protect from a hard IRQ. Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver") Signed-off-by: Tudor Ambarus <tudor.ambarus@xxxxxxxxxxxxx> --- drivers/dma/at_xdmac.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b6547f1b5645..eeb03065d484 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1608,14 +1608,17 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - if (!list_empty(&atchan->xfers_list)) { - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; - - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + spin_lock_irq(&atchan->lock); + if (list_empty(&atchan->xfers_list)) { + spin_unlock_irq(&atchan->lock); + return; } + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + spin_unlock_irq(&atchan->lock); + txd = &desc->tx_dma_desc; + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) -- 2.25.1