From: Mans Rullgard <mans@xxxxxxxxx> This patch has been added to the 4.1 stable tree. If you have any objections, please let us know. =============== [ Upstream commit 2895b2cad6e7a95104cf396e5330054453382ae1 ] Cyclic transfer callbacks rely on block completion interrupts which were disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts"). This re-enables block interrupts so the cyclic callbacks can work. Other transfer types are not affected as they set the INT_EN bit only on the last block. Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts") Signed-off-by: Mans Rullgard <mans@xxxxxxxxx> Reviewed-by: Viresh Kumar <viresh.kumar@xxxxxxxxxx> Signed-off-by: Vinod Koul <vinod.koul@xxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> Signed-off-by: Sasha Levin <sasha.levin@xxxxxxxxxx> --- drivers/dma/dw/core.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 115e5cf..303d937 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.BLOCK, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); dwc->initialized = true; @@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); /* Called with dwc->lock held and all DMAC interrupts disabled */ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - u32 status_err, u32 status_xfer) + u32 status_block, u32 status_err, u32 status_xfer) { unsigned long flags; - if (dwc->mask) { + if (status_block & dwc->mask) { void (*callback)(void *param); void *callback_param; dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", channel_readl(dwc, LLP)); + dma_writel(dw, CLEAR.BLOCK, dwc->mask); callback = dwc->cdesc->period_callback; callback_param = dwc->cdesc->period_callback_param; @@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_HI, 0); + dma_writel(dw, CLEAR.BLOCK, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); @@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data) { struct dw_dma *dw = (struct dw_dma *)data; struct dw_dma_chan *dwc; + u32 status_block; u32 status_xfer; u32 status_err; int i; + status_block = dma_readl(dw, RAW.BLOCK); status_xfer = dma_readl(dw, RAW.XFER); status_err = dma_readl(dw, RAW.ERROR); @@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data) for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) - dwc_handle_cyclic(dw, dwc, status_err, status_xfer); + dwc_handle_cyclic(dw, dwc, status_block, status_err, + status_xfer); else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if (status_xfer & (1 << i)) @@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data) * Re-enable interrupts. */ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); } @@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) * softirq handler. */ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); status = dma_readl(dw, STATUS_INT); @@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) /* Try to recover */ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); + channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); @@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw) dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); @@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); + channel_clear_bit(dw, MASK.BLOCK, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask); spin_unlock_irqrestore(&dwc->lock, flags); @@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) dwc_chan_disable(dw, dwc); + dma_writel(dw, CLEAR.BLOCK, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); @@ -1548,9 +1560,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Force dma off, just in case */ dw_dma_off(dw); - /* Disable BLOCK interrupts as well */ - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, sizeof(struct dw_desc), 4, 0); -- 2.5.0 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html