On 4/9/21 7:56 PM, Radhey Shyam Pandey wrote:
AXIDMA IP in SG mode sets completion bit to 1 when the transfer is
completed. Read this bit to move descriptor from active list to the
done list. This feature is needed when interrupt delay timeout and
IRQThreshold is enabled i.e Dly_IrqEn is triggered w/o completing
interrupt threshold.
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xxxxxxxxxx>
---
- Check BD completion bit only for SG mode.
- Modify the logic to have early return path.
---
drivers/dma/xilinx/xilinx_dma.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 890bf46b36e5..f2305a73cb91 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -177,6 +177,7 @@
#define XILINX_DMA_CR_COALESCE_SHIFT 16
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_BD_COMP_MASK BIT(31)
#define XILINX_DMA_COALESCE_MAX 255
#define XILINX_DMA_NUM_DESCS 512
#define XILINX_DMA_NUM_APP_WORDS 5
@@ -1683,12 +1684,18 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan)
static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *desc, *next;
+ struct xilinx_axidma_tx_segment *seg;
/* This function was invoked with lock held */
if (list_empty(&chan->active_list))
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ /* TODO: remove hardcoding for axidma_tx_segment */
+ seg = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
This needs to be fixed before this can be merged as it right now will
break the non AXIDMA variants.
+ if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
+ break;
if (chan->has_sg && chan->xdev->dma_config->dmatype !=
XDMA_TYPE_VDMA)
desc->residue = xilinx_dma_get_residue(chan, desc);