Hi Green, On 20-09-19, 17:01, Green Wan wrote: Please make sure threading is *not* broken in your patch series. Atm they are all over place in my mailbox! > Link: https://www.kernel.org/doc/html/v4.17/driver-api/dmaengine/ > Link: https://static.dev.sifive.com/FU540-C000-v1.0.pdf Link tag is used for discussion for the patch, please drop first one and add second one as a documentation for hardware > diff --git a/MAINTAINERS b/MAINTAINERS > index d0caa09a479e..c5f0662c9106 100644 > --- a/MAINTAINERS > +++ b/MAINTAINERS > @@ -14594,6 +14594,7 @@ F: drivers/media/mmc/siano/ > SIFIVE PDMA DRIVER > M: Green Wan <green.wan@xxxxxxxxxx> > S: Maintained > +F: drivers/dma/sf-pdma/ > F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml What is this generated against, only one line? > +static void sf_pdma_fill_desc(struct sf_pdma_chan *chan, > + u64 dst, > + u64 src, > + u64 size) Please align these to precceeding line open brace! > +{ > + struct pdma_regs *regs = &chan->regs; > + > + writel(PDMA_FULL_SPEED, regs->xfer_type); > + writeq(size, regs->xfer_size); > + writeq(dst, regs->dst_addr); > + writeq(src, regs->src_addr); > +} > + > +void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan) > +{ > + struct pdma_regs *regs = &chan->regs; > + > + writel(PDMA_CLEAR_CTRL, regs->ctrl); > +} > + > +struct dma_async_tx_descriptor * > + sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, > + dma_addr_t dest, > + dma_addr_t src, > + size_t len, > + unsigned long flags) > +{ > + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); > + struct sf_pdma_desc *desc; > + > + if (!chan || !len || !dest || !src) { > + pr_debug("%s: Please check dma len, dest, src!\n", __func__); > + return NULL; > + } > + > + desc = sf_pdma_alloc_desc(chan); > + if (!desc) > + return NULL; > + > + desc->in_use = true; > + desc->dirn = DMA_MEM_TO_MEM; > + desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); No error checking? > + > + spin_lock_irqsave(&chan->lock, flags); > + chan->desc = desc; > + sf_pdma_fill_desc(desc->chan, dest, src, len); > + spin_unlock_irqrestore(&chan->lock, flags); > + > + return desc->async_tx; > +} > + > +static void sf_pdma_unprep_slave_dma(struct sf_pdma_chan *chan) > +{ > + if (chan->dma_dir != DMA_NONE) > + dma_unmap_resource(chan->vchan.chan.device->dev, This is slave dma right, why are you unmapping? Also where is the mapping call? > + chan->dma_dev_addr, > + chan->dma_dev_size, > + chan->dma_dir, 0); > + chan->dma_dir = DMA_NONE; > +} > + > +static int sf_pdma_slave_config(struct dma_chan *dchan, > + struct dma_slave_config *cfg) > +{ > + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); > + > + memcpy(&chan->cfg, cfg, sizeof(*cfg)); > + sf_pdma_unprep_slave_dma(chan); Why unprep? > +static enum dma_status > +sf_pdma_tx_status(struct dma_chan *dchan, > + dma_cookie_t cookie, > + struct dma_tx_state *txstate) > +{ > + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); > + enum dma_status status; > + unsigned long flags; > + > + spin_lock_irqsave(&chan->lock, flags); > + if (chan->xfer_err) { > + chan->status = DMA_ERROR; > + spin_unlock_irqrestore(&chan->lock, flags); > + return chan->status; > + } > + > + spin_unlock_irqrestore(&chan->lock, flags); > + > + status = dma_cookie_status(dchan, cookie, txstate); > + > + if (status == DMA_COMPLETE) > + return status; > + > + if (!txstate) > + return chan->status; why not return status? Is that expected to be different than status? > +static int sf_pdma_remove(struct platform_device *pdev) > +{ > + struct sf_pdma *pdma = platform_get_drvdata(pdev); > + > + dma_async_device_unregister(&pdma->dma_dev); whay about irqs and tasklets, they are still enabled and can trigger! -- ~Vinod