<gregkh@xxxxxxxxxxxxxxxxxxx> writes: > This is a note to let you know that I've just added the patch titled > > dma: pl330: Fix cyclic transfers > > to the 3.4-stable tree which can be found at: > http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary > > The filename of the patch is: > dma-pl330-fix-cyclic-transfers.patch > and it can be found in the queue-3.4 subdirectory. > > If you, or anyone else, feels it should not be added to the stable tree, > please let <stable@xxxxxxxxxxxxxxx> know about it. > While working on this patch for the 3.5 kernel I found it breaks the kernel build for 2 reasons: * is_slave_direction() is not defined (although you could cherry-pick 61cc13a51bcff737ce02d2047834171c0365b00d commit) * function pl330_prep_dma_cyclic() signature doesn't contain the 'flags' parameter introduced by commit ec8b5e48c03790a68cb875fe5064007a9cbdfdd0 I checked 3.4 kernel and looks like you'll have the same issues. Cheers, -- Luis > > From fc51446021f42aca8906e701fc2292965aafcb15 Mon Sep 17 00:00:00 >2001 > From: Lars-Peter Clausen <lars@xxxxxxxxxx> > Date: Tue, 23 Jul 2013 10:24:50 +0200 > Subject: dma: pl330: Fix cyclic transfers > > From: Lars-Peter Clausen <lars@xxxxxxxxxx> > > commit fc51446021f42aca8906e701fc2292965aafcb15 upstream. > > Allocate a descriptor for each period of a cyclic transfer, not just the first. > Also since the callback needs to be called for each finished period make sure to > initialize the callback and callback_param fields of each descriptor in a cyclic > transfer. > > Signed-off-by: Lars-Peter Clausen <lars@xxxxxxxxxx> > Signed-off-by: Vinod Koul <vinod.koul@xxxxxxxxx> > Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> > > --- > drivers/dma/pl330.c | 93 +++++++++++++++++++++++++++++++++++++--------------- > 1 file changed, 67 insertions(+), 26 deletions(-) > > --- a/drivers/dma/pl330.c > +++ b/drivers/dma/pl330.c > @@ -2511,6 +2511,10 @@ static dma_cookie_t pl330_tx_submit(stru > /* Assign cookies to all nodes */ > while (!list_empty(&last->node)) { > desc = list_entry(last->node.next, struct dma_pl330_desc, node); > + if (pch->cyclic) { > + desc->txd.callback = last->txd.callback; > + desc->txd.callback_param = last->txd.callback_param; > + } > > dma_cookie_assign(&desc->txd); > > @@ -2694,45 +2698,82 @@ static struct dma_async_tx_descriptor *p > size_t period_len, enum dma_transfer_direction direction, > void *context) > { > - struct dma_pl330_desc *desc; > + struct dma_pl330_desc *desc = NULL, *first = NULL; > struct dma_pl330_chan *pch = to_pchan(chan); > + struct dma_pl330_dmac *pdmac = pch->dmac; > + unsigned int i; > dma_addr_t dst; > dma_addr_t src; > > - desc = pl330_get_desc(pch); > - if (!desc) { > - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", > - __func__, __LINE__); > + if (len % period_len != 0) > return NULL; > - } > > - switch (direction) { > - case DMA_MEM_TO_DEV: > - desc->rqcfg.src_inc = 1; > - desc->rqcfg.dst_inc = 0; > - desc->req.rqtype = MEMTODEV; > - src = dma_addr; > - dst = pch->fifo_addr; > - break; > - case DMA_DEV_TO_MEM: > - desc->rqcfg.src_inc = 0; > - desc->rqcfg.dst_inc = 1; > - desc->req.rqtype = DEVTOMEM; > - src = pch->fifo_addr; > - dst = dma_addr; > - break; > - default: > + if (!is_slave_direction(direction)) { > dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", > __func__, __LINE__); > return NULL; > } > > - desc->rqcfg.brst_size = pch->burst_sz; > - desc->rqcfg.brst_len = 1; > + for (i = 0; i < len / period_len; i++) { > + desc = pl330_get_desc(pch); > + if (!desc) { > + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", > + __func__, __LINE__); > > - pch->cyclic = true; > + if (!first) > + return NULL; > + > + spin_lock_irqsave(&pdmac->pool_lock, flags); > + > + while (!list_empty(&first->node)) { > + desc = list_entry(first->node.next, > + struct dma_pl330_desc, node); > + list_move_tail(&desc->node, &pdmac->desc_pool); > + } > + > + list_move_tail(&first->node, &pdmac->desc_pool); > + > + spin_unlock_irqrestore(&pdmac->pool_lock, flags); > + > + return NULL; > + } > + > + switch (direction) { > + case DMA_MEM_TO_DEV: > + desc->rqcfg.src_inc = 1; > + desc->rqcfg.dst_inc = 0; > + desc->req.rqtype = MEMTODEV; > + src = dma_addr; > + dst = pch->fifo_addr; > + break; > + case DMA_DEV_TO_MEM: > + desc->rqcfg.src_inc = 0; > + desc->rqcfg.dst_inc = 1; > + desc->req.rqtype = DEVTOMEM; > + src = pch->fifo_addr; > + dst = dma_addr; > + break; > + default: > + break; > + } > > - fill_px(&desc->px, dst, src, period_len); > + desc->rqcfg.brst_size = pch->burst_sz; > + desc->rqcfg.brst_len = 1; > + fill_px(&desc->px, dst, src, period_len); > + > + if (!first) > + first = desc; > + else > + list_add_tail(&desc->node, &first->node); > + > + dma_addr += period_len; > + } > + > + if (!desc) > + return NULL; > + > + pch->cyclic = true; > + desc->txd.flags = flags; > > return &desc->txd; > } > > > Patches currently in stable-queue which might be from lars@xxxxxxxxxx are > > queue-3.4/dma-pl330-fix-cyclic-transfers.patch > -- > To unsubscribe from this list: send the line "unsubscribe stable" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html