Apart of making the code easier to read, this patch is a prerequisite for a functional change: tasklets run with interrupts enabled, so we need to protect atchan->irq_status with spin_lock_irq() otherwise the tasklet can be interrupted by the IRQ that modifies irq_status. atchan->irq_status will be protected in a further patch. Signed-off-by: Tudor Ambarus <tudor.ambarus@xxxxxxxxxxxxx> --- drivers/dma/at_xdmac.c | 70 ++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index abe8c4615e65..ba727751a9f6 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1667,53 +1667,51 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) { struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet); struct at_xdmac_desc *desc; + struct dma_async_tx_descriptor *txd; u32 error_mask; dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", __func__, atchan->irq_status); - error_mask = AT_XDMAC_CIS_RBEIS - | AT_XDMAC_CIS_WBEIS - | AT_XDMAC_CIS_ROIS; - - if (at_xdmac_chan_is_cyclic(atchan)) { - at_xdmac_handle_cyclic(atchan); - } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) - || (atchan->irq_status & error_mask)) { - struct dma_async_tx_descriptor *txd; - - if (atchan->irq_status & error_mask) - at_xdmac_handle_error(atchan); - - spin_lock_irq(&atchan->lock); - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, - xfer_node); - dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - if (!desc->active_xfer) { - dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); - spin_unlock_irq(&atchan->lock); - return; - } + if (at_xdmac_chan_is_cyclic(atchan)) + return at_xdmac_handle_cyclic(atchan); - txd = &desc->tx_dma_desc; - dma_cookie_complete(txd); - /* Remove the transfer from the transfer list. */ - list_del(&desc->xfer_node); - spin_unlock_irq(&atchan->lock); + error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS | + AT_XDMAC_CIS_ROIS; - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && + !(atchan->irq_status & error_mask)) + return; - dma_run_dependencies(txd); + if (atchan->irq_status & error_mask) + at_xdmac_handle_error(atchan); - spin_lock_irq(&atchan->lock); - /* Move the xfer descriptors into the free descriptors list. */ - list_splice_tail_init(&desc->descs_list, - &atchan->free_descs_list); - at_xdmac_advance_work(atchan); + spin_lock_irq(&atchan->lock); + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + if (!desc->active_xfer) { + dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); spin_unlock_irq(&atchan->lock); + return; } + + txd = &desc->tx_dma_desc; + dma_cookie_complete(txd); + /* Remove the transfer from the transfer list. */ + list_del(&desc->xfer_node); + spin_unlock_irq(&atchan->lock); + + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); + + dma_run_dependencies(txd); + + spin_lock_irq(&atchan->lock); + /* Move the xfer descriptors into the free descriptors list. */ + list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list); + at_xdmac_advance_work(atchan); + spin_unlock_irq(&atchan->lock); } static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) -- 2.25.1