Hi Sia, > Subject: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA handshake > > Add support for Intel KeemBay AxiDMA device handshake programming. > Device handshake number passed in to the AxiDMA shall be written to > the Intel KeemBay AxiDMA hardware handshake registers before DMA > operations are started. > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx> > Signed-off-by: Sia Jee Heng <jee.heng.sia@xxxxxxxxx> > --- > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 +++++++++++++++++++ > 1 file changed, 52 insertions(+) > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > index c2ffc5d44b6e..d44a5c9eb9c1 100644 > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) > pm_runtime_put(chan->chip->dev); > } > > +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, u32 hs_number, > + bool set) > +{ > + unsigned long start = 0; > + unsigned long reg_value; > + unsigned long reg_mask; > + unsigned long reg_set; > + unsigned long mask; > + unsigned long val; > + > + if (!chip->apb_regs) > + return -ENODEV; In some places you check for this region existence using if (IS_ERR(chip->regs)) and in other places you use if (!chip->apb_regs) I guess it isn't correct. NOTE that this comment valid for other patches as well. > + > + /* > + * An unused DMA channel has a default value of 0x3F. > + * Lock the DMA channel by assign a handshake number to the channel. > + * Unlock the DMA channel by assign 0x3F to the channel. > + */ > + if (set) { > + reg_set = UNUSED_CHANNEL; > + val = hs_number; > + } else { > + reg_set = hs_number; > + val = UNUSED_CHANNEL; > + } > + > + reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > + > + for_each_set_clump8(start, reg_mask, ®_value, 64) { > + if (reg_mask == reg_set) { > + mask = GENMASK_ULL(start + 7, start); > + reg_value &= ~mask; > + reg_value |= rol64(val, start); > + lo_hi_writeq(reg_value, > + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > + break; > + } > + } > + > + return 0; > +} > + > /* > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI > * as 1, it understands that the current block is the final block in the > @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, > llp = hw_desc->llp; > } while (num_periods); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > + goto err_desc_get; > + > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, > llp = hw_desc->llp; > } while (sg_len); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > + goto err_desc_get; > + > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) > dev_warn(dchan2dev(dchan), > "%s failed to stop\n", axi_chan_name(chan)); > > + if (chan->direction != DMA_MEM_TO_MEM) > + dw_axi_dma_set_hw_channel(chan->chip, > + chan->hw_hs_num, false); > + > spin_lock_irqsave(&chan->vc.lock, flags); > > vchan_get_all_descriptors(&chan->vc, &head); > -- > 2.18.0 >