On Fri, Jun 23, 2017 at 10:58:16AM +0200, Stefan Roese wrote: > +/* mSGDMA descriptor control field bit definitions */ > +#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) > +#define MSGDMA_DESC_CTL_GEN_SOP BIT(8) > +#define MSGDMA_DESC_CTL_GEN_EOP BIT(9) > +#define MSGDMA_DESC_CTL_PARK_READS BIT(10) > +#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) > +#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) > +#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) > +#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) > +#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) > +#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16) why can't this be expressed in GENMASK? GENMASK(23, 16) > +/* mSGDMA response register bit definitions */ > +#define MSGDMA_RESP_EARLY_TERM BIT(8) > +#define MSGDMA_RESP_ERR_MASK 0xFF TABS or Spaces please, not both > +static struct dma_async_tx_descriptor * > +msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, > + unsigned int sg_len, enum dma_transfer_direction dir, > + unsigned long flags, void *context) > + > +{ > + struct msgdma_device *mdev = to_mdev(dchan); > + struct dma_slave_config *cfg = &mdev->slave_cfg; > + struct msgdma_sw_desc *new, *first = NULL; > + void *desc = NULL; > + size_t len, avail; > + dma_addr_t dma_dst, dma_src; > + u32 desc_cnt = 0, i; > + struct scatterlist *sg; > + u32 stride; > + > + for_each_sg(sgl, sg, sg_len, i) > + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); > + > + spin_lock_bh(&mdev->lock); > + if (desc_cnt > mdev->desc_free_cnt) { > + spin_unlock_bh(&mdev->lock); > + dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); > + return NULL; > + } > + mdev->desc_free_cnt -= desc_cnt; > + spin_unlock_bh(&mdev->lock); > + > + avail = sg_dma_len(sgl); > + > + /* Run until we are out of scatterlist entries */ > + while (true) { > + /* Allocate and populate the descriptor */ > + new = msgdma_get_descriptor(mdev); > + > + desc = &new->hw_desc; > + len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN); > + if (len == 0) > + goto fetch; can you explain this part, I typically dont like goto other than err handling > +static int msgdma_alloc_chan_resources(struct dma_chan *dchan) > +{ > + struct msgdma_device *mdev = to_mdev(dchan); > + struct msgdma_sw_desc *desc; > + int i; > + > + mdev->sw_desq = kzalloc(sizeof(*desc) * MSGDMA_DESC_NUM, GFP_NOWAIT); kcalloc perhpas > +static irqreturn_t msgdma_irq_handler(int irq, void *data) > +{ > + struct msgdma_device *mdev = data; > + u32 status; > + > + status = ioread32(&mdev->csr->status); > + if ((status & MSGDMA_CSR_STAT_BUSY) == 0) > + mdev->idle = true; > + > + tasklet_schedule(&mdev->irq_tasklet); > + > + /* Clear interrupt in mSGDMA controller */ > + iowrite32(MSGDMA_CSR_STAT_IRQ, &mdev->csr->status); > + > + return IRQ_HANDLED; are we not going to submit here :( -- ~Vinod -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html