[PATCH 3/4] dmaengine: xilinx_dma: Add CDMA SG transfer support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



CDMA devices have built-in SG capability, but it was not leveraged by the
driver. It adds a new binding for CDMA's device_prep_slave_sg, which
looks up the memory address supplied in a previous call to
dmaengine_slave_config as the beginning of a contiguous chunk of memory,
for either the source or destination of the transfer.

DMA_PRIVATE capability needs to be set for a CDMA channel too, or else
the DMA Engine core will take a reference upon registration, and it
won't be further available for callers of dma_request_chan.

Signed-off-by: Adrian Larumbe <adrian.martinezlarumbe@xxxxxxxxxx>
---
 drivers/dma/xilinx/xilinx_dma.c | 131 +++++++++++++++++++++++++++++++-
 1 file changed, 127 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 49c7093e2487..40c6cf8bf0e6 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2433,6 +2433,130 @@ xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 	return NULL;
 }
 
+/**
+ * xilinx_cdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor
+*xilinx_cdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+						   unsigned int sg_len,
+						   enum dma_transfer_direction direction,
+						   unsigned long flags, void *context)
+{
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_cdma_tx_segment *segment, *prev = NULL;
+	struct xilinx_dma_tx_descriptor *desc;
+	struct xilinx_cdma_desc_hw *hw;
+	dma_addr_t dma_dst, dma_src, dma_addr;
+	size_t len, avail;
+
+	(void)flags;
+	(void)context;
+
+	if (!is_slave_direction(direction)) {
+		dev_err(chan->xdev->dev,
+			"%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	if (unlikely(sg_len == 0 || !sgl))
+		return NULL;
+
+	dma_src = 0;
+	dma_dst = 0;
+
+	if (direction == DMA_DEV_TO_MEM)
+		dma_src = chan->cfg.src_addr;
+	else
+		dma_dst = chan->cfg.dst_addr;
+
+	desc = xilinx_dma_alloc_tx_descriptor(chan);
+	if (!desc)
+		return NULL;
+
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+	avail = sg_dma_len(sgl);
+	/*
+	 * Loop until there is either no more source or no more
+	 * destination scatterlist entries
+	 */
+	while (true) {
+		len = min_t(size_t, avail, chan->xdev->max_buffer_len);
+		if (len == 0)
+			goto fetch;
+
+		/* Allocate the link descriptor from DMA pool */
+		segment = xilinx_cdma_alloc_tx_segment(chan);
+		if (!segment)
+			goto error;
+
+		dma_addr = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
+
+		if (direction == DMA_DEV_TO_MEM)
+			dma_dst = dma_addr;
+		else
+			dma_src = dma_addr;
+
+		hw = &segment->hw;
+		hw->control = len;
+		hw->src_addr = dma_src;
+		hw->dest_addr = dma_dst;
+		if (chan->ext_addr) {
+			hw->src_addr_msb = upper_32_bits(dma_src);
+			hw->dest_addr_msb = upper_32_bits(dma_dst);
+		}
+
+		if (prev) {
+			prev->hw.next_desc = segment->phys;
+			if (chan->ext_addr)
+				prev->hw.next_desc_msb = upper_32_bits(segment->phys);
+		}
+
+		prev = segment;
+		avail -= len;
+		list_add_tail(&segment->node, &desc->segments);
+
+		if (direction == DMA_DEV_TO_MEM)
+			dma_src += len;
+		else
+			dma_dst += len;
+fetch:
+		/* Fetch the next scatterlist entry */
+		if (avail == 0) {
+			if (sg_len == 0)
+				break;
+			sgl = sg_next(sgl);
+			if (!sgl)
+				break;
+			sg_len--;
+			avail = sg_dma_len(sgl);
+		}
+	}
+
+	/* Link the last hardware descriptor with the first. */
+	segment = list_first_entry(&desc->segments,
+							   struct xilinx_cdma_tx_segment, node);
+	desc->async_tx.phys = segment->phys;
+	prev->hw.next_desc = segment->phys;
+	if (chan->ext_addr)
+		prev->hw.next_desc_msb = upper_32_bits(segment->phys);
+
+	return &desc->async_tx;
+
+error:
+	xilinx_dma_free_tx_descriptor(chan, desc);
+	return NULL;
+}
+
 /**
  * xilinx_dma_terminate_all - Halt the channel and free descriptors
  * @dchan: Driver specific DMA Channel pointer
@@ -3100,10 +3224,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 	xdev->common.dev = &pdev->dev;
 
 	INIT_LIST_HEAD(&xdev->common.channels);
-	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
-		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
-		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
-	}
+	dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+	dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
 
 	xdev->common.device_alloc_chan_resources =
 				xilinx_dma_alloc_chan_resources;
@@ -3124,6 +3246,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
 		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+		xdev->common.device_prep_slave_sg = xilinx_cdma_prep_slave_sg;
 		/* Residue calculation is supported by only AXI DMA and CDMA */
 		xdev->common.residue_granularity =
 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
-- 
2.17.1




[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux