[PATCH] dmaengine: mcf-edma: add ColdFire mcf5441x eDMA support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch adds dma support for NXP mcf5441x (ColdFire) family.

ColdFire mcf5441x implements an edma hw module similar to the
one implemented in Vybrid VFxxx controllers, but with a slightly
different register set, more dma channels (64 instead of 32),
a different interrupt mechanism and some other minor differences.

For the above reasons, modfying fsl-edma.c was too complex and
likely too ugly. From here, the decision to create a different
driver, but starting from fsl-edma.

The driver has been tested with mcf5441x (stmark2 board) and
dspi driver, it worked fine and seems reliable at least as a
first initial version.

Signed-off-by: Angelo Dureghello <angelo@xxxxxxxx>
---
 arch/m68k/configs/stmark2_defconfig        |   2 +
 drivers/dma/Kconfig                        |  11 +
 drivers/dma/Makefile                       |   1 +
 drivers/dma/mcf-edma.c                     | 847 +++++++++++++++++++++
 include/linux/platform_data/dma-mcf-edma.h |  38 +
 5 files changed, 899 insertions(+)
 create mode 100644 drivers/dma/mcf-edma.c
 create mode 100644 include/linux/platform_data/dma-mcf-edma.h

diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index bf2bfd4ebd2a..2d111e0aeb48 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -71,6 +71,8 @@ CONFIG_GPIO_GENERIC_PLATFORM=y
 # CONFIG_HWMON is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_DMADEVICES=y
+CONFIG_MCF_EDMA=y
 # CONFIG_FILE_LOCKING is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6d61cd023633..139626c01ba4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -225,6 +225,17 @@ config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config MCF_EDMA
+	tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+	depends on M5441x
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale ColdFire eDMA engine, 64-channel
+	  implementation that performs complex data transfers with
+	  minimal intervention from a host processor.
+	  This module can be found on Freescale ColdFire mcf5441x SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0f62a4d49aab..db93824441aa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
new file mode 100644
index 000000000000..8fb6282e922a
--- /dev/null
+++ b/drivers/dma/mcf-edma.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/dma/mcf-edma.c
+ *
+ * Driver for the Freescale ColdFire 64-ch eDMA implementation,
+ * derived from drivers/dma/fsl-edma.c.
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc
+ *
+ * Copyright 2017 Sysam, Angelo Dureghello  <angelo@xxxxxxxx>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dma-mcf-edma.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CHANNELS		64
+#define EDMA_MASK_CH(x)		((x) & 0x3F)
+#define EDMA_MASK_ITER(x)	((x) & 0x7FFF)
+#define EDMA_TCD_MEM_ALIGN	32
+
+#define EDMA_TCD_ATTR_DSZ_8b	(0x0000)
+#define EDMA_TCD_ATTR_DSZ_16b	(0x0001)
+#define EDMA_TCD_ATTR_DSZ_32b	(0x0002)
+#define EDMA_TCD_ATTR_DSZ_16B	(0x0004)
+#define EDMA_TCD_ATTR_SSZ_8b	(EDMA_TCD_ATTR_DSZ_8b << 8)
+#define EDMA_TCD_ATTR_SSZ_16b	(EDMA_TCD_ATTR_DSZ_16b << 8)
+#define EDMA_TCD_ATTR_SSZ_32b	(EDMA_TCD_ATTR_DSZ_32b << 8)
+#define EDMA_TCD_ATTR_SSZ_16B	(EDMA_TCD_ATTR_DSZ_16B << 8)
+
+#define MCF_EDMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define EDMA_CR_ERCA		BIT(2)
+#define EDMA_CR_ERGA		BIT(3)
+
+#define EDMA_TCD_CSR_INT_MAJOR	BIT(1)
+#define EDMA_TCD_CSR_D_REQ	BIT(3)
+#define EDMA_TCD_CSR_E_SG	BIT(4)
+
+#define EDMA_CERR_CERR(x)	((x) & 0x1F)
+
+struct edma_tcd {
+	u32	saddr;
+	u16	attr;
+	u16	soff;
+	u32	nbytes;
+	u32	slast;
+	u32	daddr;
+	u16	citer;
+	u16	doff;
+	u32	dlast_sga;
+	u16	biter;
+	u16	csr;
+};
+
+struct edma_regs {
+	u32	cr;
+	u32	es;
+	u32	erqh;
+	u32	erql;
+	u32	eeih;
+	u32	eeil;
+	u8	serq;
+	u8	cerq;
+	u8	seei;
+	u8	ceei;
+	u8	cint;
+	u8	cerr;
+	u8	ssrt;
+	u8	cdne;
+	u32	inth;
+	u32	intl;
+	u32	errh;
+	u32	errl;
+	u32	rsh;
+	u32	rsl;
+	u8	res[200];
+	u8	dch_pri[EDMA_CHANNELS];
+	u8	res2[3776];
+	struct	edma_tcd tcd[EDMA_CHANNELS];
+};
+
+struct mcf_edma_sw_tcd {
+	dma_addr_t ptcd;
+	struct edma_tcd *vtcd;
+};
+
+struct mcf_edma_desc {
+	struct virt_dma_desc vdesc;
+	struct mcf_edma_chan *echan;
+	bool iscyclic;
+	unsigned int n_tcds;
+	struct mcf_edma_sw_tcd tcd[];
+};
+
+struct mcf_edma_slave_config {
+	enum dma_transfer_direction dir;
+	enum dma_slave_buswidth addr_width;
+	u32 dev_addr;
+	u32 burst;
+	u32 attr;
+};
+
+struct mcf_edma_chan {
+	struct virt_dma_chan vchan;
+	enum dma_status	status;
+	bool idle;
+	struct mcf_edma_engine *edma;
+	struct dma_pool *tcd_pool;
+	struct mcf_edma_desc *edesc;
+	struct mcf_edma_slave_config esc;
+	u32 slave_id;
+};
+
+struct mcf_edma_engine {
+	struct dma_device dma_dev;
+	int n_chans;
+	void __iomem *membase;
+	struct mutex mcf_edma_mutex;
+	struct mcf_edma_chan chans[];
+};
+
+static struct mcf_edma_chan *to_mcf_edma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct mcf_edma_chan, vchan.chan);
+}
+
+static struct mcf_edma_desc *to_mcf_edma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct mcf_edma_desc, vdesc);
+}
+
+static unsigned int mcf_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+	switch (addr_width) {
+	case 1:
+		return EDMA_TCD_ATTR_SSZ_8b | EDMA_TCD_ATTR_DSZ_8b;
+	case 2:
+		return EDMA_TCD_ATTR_SSZ_16b | EDMA_TCD_ATTR_DSZ_16b;
+	case 4:
+	default:
+		return EDMA_TCD_ATTR_SSZ_32b | EDMA_TCD_ATTR_DSZ_32b;
+	}
+}
+
+static void mcf_edma_enable_request(struct mcf_edma_chan *mcf_chan)
+{
+	struct edma_regs *regs = mcf_chan->edma->membase;
+	u32 ch = mcf_chan->vchan.chan.chan_id;
+
+	iowrite8(EDMA_MASK_CH(ch), &regs->seei);
+	iowrite8(EDMA_MASK_CH(ch), &regs->serq);
+}
+
+static void mcf_edma_disable_request(struct mcf_edma_chan *mcf_chan)
+{
+	struct edma_regs *regs = mcf_chan->edma->membase;
+	u32 ch = mcf_chan->vchan.chan.chan_id;
+
+	iowrite8(EDMA_MASK_CH(ch), &regs->cerq);
+	iowrite8(EDMA_MASK_CH(ch), &regs->ceei);
+}
+
+static void mcf_edma_set_tcd_regs(struct mcf_edma_chan *mcf_chan,
+				  struct edma_tcd *tcd)
+{
+	struct edma_regs *regs = mcf_chan->edma->membase;
+	u32 ch = mcf_chan->vchan.chan.chan_id;
+
+	iowrite16(0, &regs->tcd[ch].csr);
+	iowrite32(tcd->saddr, &regs->tcd[ch].saddr);
+	iowrite16(tcd->attr, &regs->tcd[ch].attr);
+	iowrite16(tcd->soff, &regs->tcd[ch].soff);
+	iowrite32(tcd->nbytes, &regs->tcd[ch].nbytes);
+	iowrite32(tcd->slast, &regs->tcd[ch].slast);
+	iowrite32(tcd->daddr, &regs->tcd[ch].daddr);
+	iowrite16(tcd->citer, &regs->tcd[ch].citer);
+	iowrite16(tcd->doff, &regs->tcd[ch].doff);
+	iowrite32(tcd->dlast_sga,  &regs->tcd[ch].dlast_sga);
+	iowrite16(tcd->biter, &regs->tcd[ch].biter);
+	iowrite16(tcd->csr, &regs->tcd[ch].csr);
+}
+
+static void mcf_edma_xfer_desc(struct mcf_edma_chan *mcf_chan)
+{
+	struct virt_dma_desc *vdesc;
+
+	vdesc = vchan_next_desc(&mcf_chan->vchan);
+	if (!vdesc)
+		return;
+
+	mcf_chan->edesc = to_mcf_edma_desc(vdesc);
+
+	mcf_edma_set_tcd_regs(mcf_chan, mcf_chan->edesc->tcd[0].vtcd);
+	mcf_edma_enable_request(mcf_chan);
+
+	mcf_chan->status = DMA_IN_PROGRESS;
+	mcf_chan->idle = false;
+}
+
+static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
+{
+	struct mcf_edma_engine *mcf_edma = dev_id;
+	struct edma_regs *regs = mcf_edma->membase;
+	unsigned int ch;
+	struct mcf_edma_chan *mcf_chan;
+	u64 intmap;
+
+	intmap = ioread32(&regs->inth);
+	intmap <<= 32;
+	intmap |= ioread32(&regs->intl);
+	if (!intmap)
+		return IRQ_NONE;
+
+	for (ch = 0; ch < mcf_edma->n_chans; ch++) {
+		if (intmap & (0x1 << ch)) {
+			iowrite8(EDMA_MASK_CH(ch), &regs->cint);
+
+			mcf_chan = &mcf_edma->chans[ch];
+
+			spin_lock(&mcf_chan->vchan.lock);
+			if (!mcf_chan->edesc->iscyclic) {
+				list_del(&mcf_chan->edesc->vdesc.node);
+				vchan_cookie_complete(&mcf_chan->edesc->vdesc);
+				mcf_chan->edesc = NULL;
+				mcf_chan->status = DMA_COMPLETE;
+				mcf_chan->idle = true;
+			} else {
+				vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
+			}
+
+			if (!mcf_chan->edesc)
+				mcf_edma_xfer_desc(mcf_chan);
+
+			spin_unlock(&mcf_chan->vchan.lock);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
+{
+	struct mcf_edma_engine *mcf_edma = dev_id;
+	struct edma_regs *regs = mcf_edma->membase;
+	unsigned int err, ch;
+
+	err = ioread32(&regs->errl);
+	if (!err)
+		return IRQ_NONE;
+
+	for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
+		if (err & (0x1 << ch)) {
+			mcf_edma_disable_request(&mcf_edma->chans[ch]);
+			iowrite8(EDMA_CERR_CERR(ch), &regs->cerr);
+			mcf_edma->chans[ch].status = DMA_ERROR;
+			mcf_edma->chans[ch].idle = true;
+		}
+	}
+
+	err = ioread32(&regs->errh);
+	if (!err)
+		return IRQ_NONE;
+
+	for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
+		if (err & (0x1 << (ch - (EDMA_CHANNELS / 2)))) {
+			mcf_edma_disable_request(&mcf_edma->chans[ch]);
+			iowrite8(EDMA_CERR_CERR(ch), &regs->cerr);
+			mcf_edma->chans[ch].status = DMA_ERROR;
+			mcf_edma->chans[ch].idle = true;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline void mcf_edma_fill_tcd(struct edma_tcd *tcd,
+			u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
+			u32 slast, u16 citer, u16 biter, u16 doff,
+			u32 dlast_sga, bool major_int,
+			bool disable_req, bool enable_sg)
+{
+	u16 csr = 0;
+
+	tcd->saddr = src;
+	tcd->daddr = dst;
+	tcd->attr = attr;
+	tcd->soff = soff;
+	tcd->nbytes = nbytes;
+	tcd->slast = slast;
+	tcd->citer = EDMA_MASK_ITER(citer);
+	tcd->doff = doff;
+	tcd->dlast_sga = dlast_sga;
+	tcd->biter = EDMA_MASK_ITER(biter);
+
+	if (major_int)
+		csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+	if (disable_req)
+		csr |= EDMA_TCD_CSR_D_REQ;
+
+	if (enable_sg)
+		csr |= EDMA_TCD_CSR_E_SG;
+
+	tcd->csr = csr;
+}
+
+static int mcf_edma_slave_config(struct dma_chan *chan,
+				 struct dma_slave_config *cfg)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+
+	mcf_chan->esc.dir = cfg->direction;
+	if (cfg->direction == DMA_DEV_TO_MEM) {
+		mcf_chan->esc.dev_addr = cfg->src_addr;
+		mcf_chan->esc.addr_width = cfg->src_addr_width;
+		mcf_chan->esc.burst = cfg->src_maxburst;
+		mcf_chan->esc.attr = mcf_edma_get_tcd_attr(cfg->src_addr_width);
+	} else if (cfg->direction == DMA_MEM_TO_DEV) {
+		mcf_chan->esc.dev_addr = cfg->dst_addr;
+		mcf_chan->esc.addr_width = cfg->dst_addr_width;
+		mcf_chan->esc.burst = cfg->dst_maxburst;
+		mcf_chan->esc.attr = mcf_edma_get_tcd_attr(cfg->dst_addr_width);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct mcf_edma_desc *mcf_edma_alloc_desc(
+		struct mcf_edma_chan *mcf_chan, int sg_len)
+{
+	struct mcf_edma_desc *mcf_desc;
+	int i;
+
+	mcf_desc = kzalloc(sizeof(*mcf_desc) + sizeof(struct mcf_edma_sw_tcd)
+			* sg_len, GFP_NOWAIT);
+	if (!mcf_desc)
+		return NULL;
+
+	mcf_desc->echan = mcf_chan;
+	mcf_desc->n_tcds = sg_len;
+	for (i = 0; i < sg_len; i++) {
+		mcf_desc->tcd[i].vtcd = dma_pool_alloc(mcf_chan->tcd_pool,
+					GFP_NOWAIT, &mcf_desc->tcd[i].ptcd);
+		if (!mcf_desc->tcd[i].vtcd)
+			goto err;
+	}
+
+	return mcf_desc;
+
+err:
+	while (--i >= 0)
+		dma_pool_free(mcf_chan->tcd_pool, mcf_desc->tcd[i].vtcd,
+				mcf_desc->tcd[i].ptcd);
+	kfree(mcf_desc);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *mcf_edma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+	struct mcf_edma_desc *mcf_desc;
+	struct scatterlist *sg;
+	u32 src_addr, dst_addr, last_sg, nbytes;
+	u16 soff, doff, iter;
+	int i;
+
+	if (!is_slave_direction(mcf_chan->esc.dir))
+		return NULL;
+
+	mcf_desc = mcf_edma_alloc_desc(mcf_chan, sg_len);
+	if (!mcf_desc)
+		return NULL;
+
+	mcf_desc->iscyclic = false;
+
+	nbytes = mcf_chan->esc.addr_width * mcf_chan->esc.burst;
+	for_each_sg(sgl, sg, sg_len, i) {
+		/* get next sg's physical address */
+		last_sg = mcf_desc->tcd[(i + 1) % sg_len].ptcd;
+
+		if (mcf_chan->esc.dir == DMA_MEM_TO_DEV) {
+			src_addr = sg_dma_address(sg);
+			dst_addr = mcf_chan->esc.dev_addr;
+			soff = mcf_chan->esc.addr_width;
+			doff = 0;
+		} else {
+			src_addr = mcf_chan->esc.dev_addr;
+			dst_addr = sg_dma_address(sg);
+			soff = 0;
+			doff = mcf_chan->esc.addr_width;
+		}
+
+		iter = sg_dma_len(sg) / nbytes;
+		if (i < sg_len - 1) {
+			last_sg = mcf_desc->tcd[(i + 1)].ptcd;
+			mcf_edma_fill_tcd(mcf_desc->tcd[i].vtcd, src_addr,
+					  dst_addr, mcf_chan->esc.attr, soff,
+					  nbytes, 0, iter, iter, doff, last_sg,
+					  false, false, true);
+		} else {
+			last_sg = 0;
+			mcf_edma_fill_tcd(mcf_desc->tcd[i].vtcd, src_addr,
+					  dst_addr, mcf_chan->esc.attr, soff,
+					  nbytes, 0, iter, iter, doff, last_sg,
+					  true, true, false);
+		}
+	}
+
+	return vchan_tx_prep(&mcf_chan->vchan, &mcf_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *mcf_edma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long flags)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+	struct mcf_edma_desc *mcf_desc;
+	dma_addr_t dma_buf_next;
+	int sg_len, i;
+	u32 src_addr, dst_addr, last_sg, nbytes;
+	u16 soff, doff, iter;
+
+	if (!is_slave_direction(mcf_chan->esc.dir))
+		return NULL;
+
+	sg_len = buf_len / period_len;
+	mcf_desc = mcf_edma_alloc_desc(mcf_chan, sg_len);
+	if (!mcf_desc)
+		return NULL;
+	mcf_desc->iscyclic = true;
+
+	dma_buf_next = dma_addr;
+	nbytes = mcf_chan->esc.addr_width * mcf_chan->esc.burst;
+	iter = period_len / nbytes;
+
+	for (i = 0; i < sg_len; i++) {
+		if (dma_buf_next >= dma_addr + buf_len)
+			dma_buf_next = dma_addr;
+
+		/* get next sg's physical address */
+		last_sg = mcf_desc->tcd[(i + 1) % sg_len].ptcd;
+
+		if (mcf_chan->esc.dir == DMA_MEM_TO_DEV) {
+			src_addr = dma_buf_next;
+			dst_addr = mcf_chan->esc.dev_addr;
+			soff = mcf_chan->esc.addr_width;
+			doff = 0;
+		} else {
+			src_addr = mcf_chan->esc.dev_addr;
+			dst_addr = dma_buf_next;
+			soff = 0;
+			doff = mcf_chan->esc.addr_width;
+		}
+
+		mcf_edma_fill_tcd(mcf_desc->tcd[i].vtcd, src_addr, dst_addr,
+				  mcf_chan->esc.attr, soff, nbytes, 0, iter,
+				  iter, doff, last_sg, true, false, true);
+		dma_buf_next += period_len;
+	}
+
+	return vchan_tx_prep(&mcf_chan->vchan, &mcf_desc->vdesc, flags);
+}
+
+static int mcf_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+
+	mcf_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+			sizeof(struct edma_tcd), EDMA_TCD_MEM_ALIGN, 0);
+
+	return 0;
+}
+
+static void mcf_edma_free_chan_resources(struct dma_chan *chan)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&mcf_chan->vchan.lock, flags);
+	mcf_edma_disable_request(mcf_chan);
+	mcf_chan->edesc = NULL;
+	vchan_get_all_descriptors(&mcf_chan->vchan, &head);
+	spin_unlock_irqrestore(&mcf_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&mcf_chan->vchan, &head);
+	dma_pool_destroy(mcf_chan->tcd_pool);
+	mcf_chan->tcd_pool = NULL;
+}
+
+static size_t mcf_edma_desc_residue(struct mcf_edma_chan *mcf_chan,
+		struct virt_dma_desc *vdesc, bool in_progress)
+{
+	struct mcf_edma_desc *edesc = mcf_chan->edesc;
+	struct edma_regs *regs = mcf_chan->edma->membase;
+	u32 ch = mcf_chan->vchan.chan.chan_id;
+	enum dma_transfer_direction dir = mcf_chan->esc.dir;
+	dma_addr_t cur_addr, dma_addr;
+	size_t len, size;
+	int i;
+
+	/* calculate the total size in this desc */
+	for (len = i = 0; i < mcf_chan->edesc->n_tcds; i++)
+		len += edesc->tcd[i].vtcd->nbytes * edesc->tcd[i].vtcd->biter;
+
+	if (!in_progress)
+		return len;
+
+	cur_addr = (dir == DMA_MEM_TO_DEV) ?
+		ioread32(&regs->tcd[ch].saddr) :
+		ioread32(&regs->tcd[ch].daddr);
+
+	/* figure out the finished and calculate the residue */
+	for (i = 0; i < mcf_chan->edesc->n_tcds; i++) {
+		size = edesc->tcd[i].vtcd->nbytes * edesc->tcd[i].vtcd->biter;
+		if (dir == DMA_MEM_TO_DEV)
+			dma_addr = edesc->tcd[i].vtcd->saddr;
+		else
+			dma_addr = edesc->tcd[i].vtcd->daddr;
+
+		len -= size;
+		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
+			len += dma_addr + size - cur_addr;
+			break;
+		}
+	}
+
+	return len;
+}
+
+static enum dma_status mcf_edma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	unsigned long flags;
+
+	status = dma_cookie_status(chan, cookie, txstate);
+	if (status == DMA_COMPLETE)
+		return status;
+
+	if (!txstate)
+		return mcf_chan->status;
+
+	spin_lock_irqsave(&mcf_chan->vchan.lock, flags);
+	vdesc = vchan_find_desc(&mcf_chan->vchan, cookie);
+	if (mcf_chan->edesc && cookie == mcf_chan->edesc->vdesc.tx.cookie)
+		txstate->residue =
+				mcf_edma_desc_residue(mcf_chan, vdesc, true);
+	else if (vdesc)
+		txstate->residue =
+				mcf_edma_desc_residue(mcf_chan, vdesc, false);
+	else
+		txstate->residue = 0;
+
+	spin_unlock_irqrestore(&mcf_chan->vchan.lock, flags);
+
+	return mcf_chan->status;
+}
+
+static void mcf_edma_issue_pending(struct dma_chan *chan)
+{
+	struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mcf_chan->vchan.lock, flags);
+
+	if (vchan_issue_pending(&mcf_chan->vchan) && !mcf_chan->edesc)
+		mcf_edma_xfer_desc(mcf_chan);
+
+	spin_unlock_irqrestore(&mcf_chan->vchan.lock, flags);
+}
+
+static void mcf_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct mcf_edma_desc *mcf_desc;
+	int i;
+	struct edma_regs *regs;
+
+	mcf_desc = to_mcf_edma_desc(vdesc);
+	regs = mcf_desc->echan->edma->membase;
+
+	//trace_tcd(&regs->tcd[mcf_desc->echan->slave_id]);
+	//trace_regs(regs);
+
+	for (i = 0; i < mcf_desc->n_tcds; i++)
+		dma_pool_free(mcf_desc->echan->tcd_pool, mcf_desc->tcd[i].vtcd,
+				mcf_desc->tcd[i].ptcd);
+	kfree(mcf_desc);
+}
+
+static int mcf_edma_irq_init(struct platform_device *pdev,
+				struct mcf_edma_engine *mcf_edma)
+{
+	int ret = 0, i;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev,
+				IORESOURCE_IRQ, "edma-tx-00-15");
+	if (!res)
+		return -1;
+
+	for (ret = 0, i = res->start; i <= res->end; ++i) {
+		ret |= devm_request_irq(&pdev->dev, i,
+			mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+	}
+	if (ret)
+		return ret;
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "edma-tx-16-55");
+	if (!res)
+		return -1;
+
+	for (ret = 0, i = res->start; i <= res->end; ++i) {
+		ret |= devm_request_irq(&pdev->dev, i,
+			mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+	}
+	if (ret)
+		return ret;
+
+	ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
+	if (ret != -ENXIO) {
+		ret = devm_request_irq(&pdev->dev, ret,
+			mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+		if (ret)
+			return ret;
+	}
+
+	ret = platform_get_irq_byname(pdev, "edma-err");
+	if (ret != -ENXIO) {
+		ret = devm_request_irq(&pdev->dev, ret,
+			mcf_edma_err_handler, 0, "eDMA", mcf_edma);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mcf_edma_irq_free(struct platform_device *pdev,
+				struct mcf_edma_engine *mcf_edma)
+{
+	int irq;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "edma-tx-00-15");
+	if (res) {
+		for (irq = res->start; irq <= res->end; irq++)
+			devm_free_irq(&pdev->dev, irq, mcf_edma);
+	}
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "edma-tx-16-55");
+	if (res) {
+		for (irq = res->start; irq <= res->end; irq++)
+			devm_free_irq(&pdev->dev, irq, mcf_edma);
+	}
+
+	irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
+	if (irq != -ENXIO)
+		devm_free_irq(&pdev->dev, irq, mcf_edma);
+
+	irq = platform_get_irq_byname(pdev, "edma-err");
+	if (irq != -ENXIO)
+		devm_free_irq(&pdev->dev, irq, mcf_edma);
+}
+
+static int mcf_edma_probe(struct platform_device *pdev)
+{
+	struct mcf_edma_platform_data *pdata;
+	struct mcf_edma_engine *mcf_edma;
+	struct mcf_edma_chan *mcf_chan;
+	struct edma_regs *regs;
+	struct resource *res;
+	int ret, i, len, chans;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata)
+		return PTR_ERR(pdata);
+
+	chans = pdata->dma_channels;
+	len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+	mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!mcf_edma)
+		return -ENOMEM;
+
+	mcf_edma->n_chans = chans;
+
+	if (!mcf_edma->n_chans) {
+		pr_info("setting default channel number to 64");
+		mcf_edma->n_chans = 64;
+	}
+
+	mutex_init(&mcf_edma->mcf_edma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mcf_edma->membase))
+		return PTR_ERR(mcf_edma->membase);
+
+	regs = mcf_edma->membase;
+
+	INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
+	for (i = 0; i < mcf_edma->n_chans; i++) {
+		struct mcf_edma_chan *mcf_chan = &mcf_edma->chans[i];
+
+		mcf_chan->edma = mcf_edma;
+		mcf_chan->slave_id = i;
+		mcf_chan->idle = true;
+		mcf_chan->vchan.desc_free = mcf_edma_free_desc;
+		vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
+		iowrite32(0x0, &regs->tcd[i].csr);
+	}
+
+	iowrite32(~0, &regs->inth);
+	iowrite32(~0, &regs->intl);
+
+	ret = mcf_edma_irq_init(pdev, mcf_edma);
+	if (ret)
+		return ret;
+
+	dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
+
+	mcf_edma->dma_dev.dev = &pdev->dev;
+	mcf_edma->dma_dev.device_alloc_chan_resources =
+			mcf_edma_alloc_chan_resources;
+	mcf_edma->dma_dev.device_free_chan_resources =
+			mcf_edma_free_chan_resources;
+	mcf_edma->dma_dev.device_config = mcf_edma_slave_config;
+	mcf_edma->dma_dev.device_prep_dma_cyclic =
+			mcf_edma_prep_dma_cyclic;
+	mcf_edma->dma_dev.device_prep_slave_sg = mcf_edma_prep_slave_sg;
+	mcf_edma->dma_dev.device_tx_status = mcf_edma_tx_status;
+	mcf_edma->dma_dev.device_issue_pending = mcf_edma_issue_pending;
+
+	mcf_edma->dma_dev.src_addr_widths = MCF_EDMA_BUSWIDTHS;
+	mcf_edma->dma_dev.dst_addr_widths = MCF_EDMA_BUSWIDTHS;
+	mcf_edma->dma_dev.directions =
+			BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+	mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
+	mcf_edma->dma_dev.filter.map = pdata->slave_map;
+	mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
+
+	platform_set_drvdata(pdev, mcf_edma);
+
+	ret = dma_async_device_register(&mcf_edma->dma_dev);
+	if (ret) {
+		pr_err("Can't register Freescale eDMA engine. (%d)\n", ret);
+		return ret;
+	}
+
+	/* Enable round robin arbitration */
+	iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, &regs->cr);
+
+	return 0;
+}
+
+static void mcf_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct mcf_edma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
+static int mcf_edma_remove(struct platform_device *pdev)
+{
+	struct mcf_edma_engine *mcf_edma = platform_get_drvdata(pdev);
+
+	mcf_edma_irq_free(pdev, mcf_edma);
+	mcf_edma_cleanup_vchan(&mcf_edma->dma_dev);
+	dma_async_device_unregister(&mcf_edma->dma_dev);
+
+	return 0;
+}
+
+static struct platform_driver mcf_edma_driver = {
+	.driver		= {
+		.name	= "mcf-edma",
+	},
+	.probe		= mcf_edma_probe,
+	.remove		= mcf_edma_remove,
+};
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &mcf_edma_driver.driver) {
+		struct mcf_edma_chan *mcf_chan = to_mcf_edma_chan(chan);
+
+		return (mcf_chan->slave_id == (int)param);
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(mcf_edma_filter_fn);
+
+static int __init mcf_edma_init(void)
+{
+	return platform_driver_register(&mcf_edma_driver);
+}
+subsys_initcall(mcf_edma_init);
+
+static void __exit mcf_edma_exit(void)
+{
+	platform_driver_unregister(&mcf_edma_driver);
+}
+module_exit(mcf_edma_exit);
+
+MODULE_ALIAS("platform:mcf-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
+MODULE_LICENSE("GPL v2");
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..9a1819acb28f
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale eDMA platform data, ColdFire SoC's family.
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@xxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MACH_MCF_EDMA_H__
+#define __MACH_MCF_EDMA_H__
+
+struct dma_slave_map;
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
+
+#define MCF_EDMA_FILTER_PARAM(ch)	((void *)ch)
+
+/**
+ * struct mcf_edma_platform_data - platform specific data for eDMA engine
+ *
+ * @ver			The eDMA module version.
+ * @dma_channels	The number of eDMA channels.
+ */
+struct mcf_edma_platform_data {
+	int dma_channels;
+	const struct dma_slave_map *slave_map;
+	int slavecnt;
+};
+
+#endif /* __MACH_MCF_EDMA_H__ */
-- 
2.17.0

--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux