From: Neel Gandhi <neel.gandhi@xxxxxxxxxx> The vchan_next_desc() function, called from xilinx_dpdma_chan_queue_transfer(), must be called with virt_dma_chan.lock held. This isn't correctly handled in all code paths, resulting in a race condition between the .device_issue_pending() handler and the IRQ handler which causes DMA to randomly stop. Fix it by taking the lock around xilinx_dpdma_chan_queue_transfer() calls that are missing it. Signed-off-by: Neel Gandhi <neel.gandhi@xxxxxxx> Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xxxxxxx> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@xxxxxxxxxxxxxxxx> Signed-off-by: Vishal Sagar <vishal.sagar@xxxxxxx> Link: https://lore.kernel.org/all/20220122121407.11467-1-neel.gandhi@xxxxxxxxxx --- drivers/dma/xilinx/xilinx_dpdma.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index b82815e64d24..28d9af8f00f0 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -1097,12 +1097,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) * Complete the active descriptor, if any, promote the pending * descriptor to active, and queue the next transfer, if any. */ + spin_lock(&chan->vchan.lock); if (chan->desc.active) vchan_cookie_complete(&chan->desc.active->vdesc); chan->desc.active = pending; chan->desc.pending = NULL; xilinx_dpdma_chan_queue_transfer(chan); + spin_unlock(&chan->vchan.lock); out: spin_unlock_irqrestore(&chan->lock, flags); @@ -1264,10 +1266,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); unsigned long flags; - spin_lock_irqsave(&chan->vchan.lock, flags); + spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->vchan.lock); if (vchan_issue_pending(&chan->vchan)) xilinx_dpdma_chan_queue_transfer(chan); - spin_unlock_irqrestore(&chan->vchan.lock, flags); + spin_unlock(&chan->vchan.lock); + spin_unlock_irqrestore(&chan->lock, flags); } static int xilinx_dpdma_config(struct dma_chan *dchan, @@ -1495,7 +1499,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t) XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->vchan.lock); xilinx_dpdma_chan_queue_transfer(chan); + spin_unlock(&chan->vchan.lock); spin_unlock_irqrestore(&chan->lock, flags); } -- 2.25.1