Re: [PATCH v6] dma: sun4i: Add support for the DMA engine on sun[457]i SoCs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, May 11, 2015 at 03:27:32PM +0200, Maxime Ripard wrote:
> +
> +/** Normal DMA register values **/
> +
> +/* Normal DMA source/destination data request type values */
> +#define NDMA_DRQ_TYPE_SDRAM			0x16
> +#define NDMA_DRQ_TYPE_LIMIT			(0x1F + 1)
> +
> +/** Normal DMA register layout **/
> +
> +/* Normal DMA configuration register layout */
> +#define NDMA_CFG_LOADING			BIT(31)
> +#define NDMA_CFG_CONT_MODE			BIT(30)
> +#define NDMA_CFG_WAIT_STATE(n)			((n) << 27)
> +#define NDMA_CFG_DEST_DATA_WIDTH(width)		((width) << 25)
> +#define NDMA_CFG_DEST_BURST_LENGTH(len)		((len) << 23)
> +#define NDMA_CFG_DEST_NON_SECURE		BIT(22)
> +#define NDMA_CFG_DEST_FIXED_ADDR		BIT(21)
> +#define NDMA_CFG_DEST_DRQ_TYPE(type)		((type) << 16)
> +#define NDMA_CFG_BYTE_COUNT_MODE_REMAIN		BIT(15)
> +#define NDMA_CFG_SRC_DATA_WIDTH(width)		((width) << 9)
> +#define NDMA_CFG_SRC_BURST_LENGTH(len)		((len) << 7)
> +#define NDMA_CFG_SRC_NON_SECURE			BIT(6)
> +#define NDMA_CFG_SRC_FIXED_ADDR			BIT(5)
> +#define NDMA_CFG_SRC_DRQ_TYPE(type)		((type) << 0)
> +
> +/** Dedicated DMA register values **/
> +
> +/* Dedicated DMA source/destination address mode values */
> +#define DDMA_ADDR_MODE_LINEAR			0
> +#define DDMA_ADDR_MODE_IO			1
> +#define DDMA_ADDR_MODE_HORIZONTAL_PAGE		2
> +#define DDMA_ADDR_MODE_VERTICAL_PAGE		3
> +
> +/* Dedicated DMA source/destination data request type values */
> +#define DDMA_DRQ_TYPE_SDRAM			0x1
> +#define DDMA_DRQ_TYPE_LIMIT			(0x1F + 1)
> +
> +/** Dedicated DMA register layout **/
> +
> +/* Dedicated DMA configuration register layout */
> +#define DDMA_CFG_LOADING			BIT(31)
> +#define DDMA_CFG_BUSY				BIT(30)
> +#define DDMA_CFG_CONT_MODE			BIT(29)
> +#define DDMA_CFG_DEST_NON_SECURE		BIT(28)
> +#define DDMA_CFG_DEST_DATA_WIDTH(width)		((width) << 25)
> +#define DDMA_CFG_DEST_BURST_LENGTH(len)		((len) << 23)
> +#define DDMA_CFG_DEST_ADDR_MODE(mode)		((mode) << 21)
> +#define DDMA_CFG_DEST_DRQ_TYPE(type)		((type) << 16)
> +#define DDMA_CFG_BYTE_COUNT_MODE_REMAIN		BIT(15)
> +#define DDMA_CFG_SRC_NON_SECURE			BIT(12)
> +#define DDMA_CFG_SRC_DATA_WIDTH(width)		((width) << 9)
> +#define DDMA_CFG_SRC_BURST_LENGTH(len)		((len) << 7)
> +#define DDMA_CFG_SRC_ADDR_MODE(mode)		((mode) << 5)
> +#define DDMA_CFG_SRC_DRQ_TYPE(type)		((type) << 0)
> +
> +/* Dedicated DMA parameter register layout */
> +#define DDMA_PARA_DEST_DATA_BLK_SIZE(n)		(((n) - 1) << 24)
> +#define DDMA_PARA_DEST_WAIT_CYCLES(n)		(((n) - 1) << 16)
> +#define DDMA_PARA_SRC_DATA_BLK_SIZE(n)		(((n) - 1) << 8)
> +#define DDMA_PARA_SRC_WAIT_CYCLES(n)		(((n) - 1) << 0)
> +
> +/** DMA register offsets **/
> +
> +/* General register offsets */
> +#define DMA_IRQ_ENABLE_REG			0x0
> +#define DMA_IRQ_PENDING_STATUS_REG		0x4
> +
> +/* Normal DMA register offsets */
> +#define NDMA_CHANNEL_REG_BASE(n)		(0x100 + (n) * 0x20)
> +#define NDMA_CFG_REG				0x0
> +#define NDMA_SRC_ADDR_REG			0x4
> +#define NDMA_DEST_ADDR_REG			0x8
> +#define NDMA_BYTE_COUNT_REG			0xC
> +
> +/* Dedicated DMA register offsets */
> +#define DDMA_CHANNEL_REG_BASE(n)		(0x300 + (n) * 0x20)
> +#define DDMA_CFG_REG				0x0
> +#define DDMA_SRC_ADDR_REG			0x4
> +#define DDMA_DEST_ADDR_REG			0x8
> +#define DDMA_BYTE_COUNT_REG			0xC
> +#define DDMA_PARA_REG				0x18
the defines here NDMA_, DDMA_, DMA_ are too generic names and not specific to
this driver, they can cause collisions in future with others, so lets make
them future proof please

> +static int choose_optimal_buswidth(dma_addr_t addr)
> +{
> +	/* On 32 bit aligned addresses, we can use a 32 bit bus width */
> +	if (addr % 4 == 0)
> +		return DMA_SLAVE_BUSWIDTH_4_BYTES;
> +	/* On 16 bit aligned addresses, we can use a 16 bit bus width */
> +	else if (addr % 2 == 0)
> +		return DMA_SLAVE_BUSWIDTH_2_BYTES;
> +
> +	/* Worst-case scenario, we need to do byte aligned reads */
> +	return DMA_SLAVE_BUSWIDTH_1_BYTE;
am not sure if that is right way, dmaengine drivers shouldn't assume and use
a parameter as they will conflict with client FIFO settings and make whole
data garbage

> +static struct sun4i_dma_promise *
> +generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
> +		      size_t len, struct dma_slave_config *sconfig)
> +{
> +	struct sun4i_dma_promise *promise;
> +	int ret;
> +
> +	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
> +	if (!promise)
> +		return NULL;
> +
> +	promise->src = src;
> +	promise->dst = dest;
> +	promise->len = len;
> +	promise->cfg = NDMA_CFG_LOADING | NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
> +
> +	/* Use sensible default values if client is using undefined ones */
> +	if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
> +		sconfig->src_addr_width = sconfig->dst_addr_width;
> +	if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
> +		sconfig->dst_addr_width = sconfig->src_addr_width;
> +	if (sconfig->src_maxburst == 0)
> +		sconfig->src_maxburst = sconfig->dst_maxburst;
> +	if (sconfig->dst_maxburst == 0)
> +		sconfig->dst_maxburst = sconfig->src_maxburst;
oh no, we certainly don't want to do that, explicit failure, fixing and
matching with clients is the right approach here

> +static struct dma_async_tx_descriptor *
> +sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
> +			  size_t period_len, enum dma_transfer_direction dir,
> +			  unsigned long flags)
> +{
> +	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
> +	struct dma_slave_config *sconfig = &vchan->cfg;
> +	struct sun4i_dma_promise *promise;
> +	struct sun4i_dma_contract *contract;
> +	dma_addr_t src, dest;
> +	u32 endpoints;
> +	int nr_periods, offset, plength, i;
> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "Invalid DMA direction\n");
> +		return NULL;
> +	}
> +
> +	if (vchan->is_dedicated) {
> +		/*
> +		 * As we are using this just for audio data, we need to use
> +		 * normal DMA. There is nothing stopping us from supporting
> +		 * dedicated DMA here as well, so if a client comes up and
> +		 * requires it, it will be simple to implement it.
> +		 */
> +		dev_err(chan2dev(chan),
> +			"Cyclic transfers are only supported on Normal DMA\n");
> +		return NULL;
> +	}
> +
> +	contract = generate_dma_contract();
> +	if (!contract)
> +		return NULL;
> +
> +	contract->is_cyclic = 1;
> +
> +	/* Figure out the endpoints and the address we need */
> +	if (dir == DMA_MEM_TO_DEV) {
> +		src = buf;
> +		dest = sconfig->dst_addr;
> +		endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) |
> +			    NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
> +			    NDMA_CFG_DEST_FIXED_ADDR;
> +	} else {
> +		src = sconfig->src_addr;
> +		dest = buf;
> +		endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
> +			    NDMA_CFG_SRC_FIXED_ADDR |
> +			    NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM);
> +	}
> +
> +	/*
> +	 * We will be using half done interrupts to make two periods
> +	 * out of a promise, so we need to program the DMA engine less
> +	 * often
> +	 */
> +	nr_periods = DIV_ROUND_UP(len / period_len, 2);
and why is that.. why don't you use actual period count here?

> +static struct dma_async_tx_descriptor *
> +sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> +			unsigned int sg_len, enum dma_transfer_direction dir,
> +			unsigned long flags, void *context)
> +{
> +	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
> +	struct dma_slave_config *sconfig = &vchan->cfg;
> +	struct sun4i_dma_promise *promise;
> +	struct sun4i_dma_contract *contract;
> +	struct scatterlist *sg;
> +	dma_addr_t srcaddr, dstaddr;
> +	u32 endpoints, para;
> +	int i;
> +
> +	if (!sgl)
> +		return NULL;
> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "Invalid DMA direction\n");
> +		return NULL;
> +	}
> +
> +	contract = generate_dma_contract();
> +	if (!contract)
> +		return NULL;
> +
> +	/* Figure out endpoints */
> +	if (vchan->is_dedicated && dir == DMA_MEM_TO_DEV) {
> +		endpoints = DDMA_CFG_SRC_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) |
> +			    DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_LINEAR) |
> +			    DDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
> +			    DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_IO);
> +	} else if (!vchan->is_dedicated && dir == DMA_MEM_TO_DEV) {
> +		endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) |
> +			    NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
> +			    NDMA_CFG_DEST_FIXED_ADDR;
> +	} else if (vchan->is_dedicated) {
> +		endpoints = DDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
> +			    DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_IO) |
> +			    DDMA_CFG_DEST_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) |
> +			    DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_LINEAR);
> +	} else {
> +		endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
> +			    NDMA_CFG_SRC_FIXED_ADDR |
> +			    NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM);
> +	}
I would like this to be reworked a bit with mapping of each endpoint
configuration to setting like dedicated and dir, that way adding future
value gets simpler by adding one more case rather than dense bring here

> +
> +	for_each_sg(sgl, sg, sg_len, i) {
> +		/* Figure out addresses */
> +		if (dir == DMA_MEM_TO_DEV) {
> +			srcaddr = sg_dma_address(sg);
> +			dstaddr = sconfig->dst_addr;
> +		} else {
> +			srcaddr = sconfig->src_addr;
> +			dstaddr = sg_dma_address(sg);
> +		}
> +
> +		/*
> +		 * These are the magic DMA engine timings that keep SPI going.
> +		 * I haven't seen any interface on DMAEngine to configure
> +		 * timings, and so far they seem to work for everything we
> +		 * support, so I've kept them here. I don't know if other
> +		 * devices need different timings because, as usual, we only
> +		 * have the "para" bitfield meanings, but no comment on what
> +		 * the values should be when doing a certain operation :|
> +		 */
> +		para = DDMA_MAGIC_SPI_PARAMETERS;
Is there no documentation available from vendor on what this means ...?


> +static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
> +					   dma_cookie_t cookie,
> +					   struct dma_tx_state *state)
> +{
> +	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
> +	struct sun4i_dma_pchan *pchan = vchan->pchan;
> +	struct sun4i_dma_contract *contract;
> +	struct sun4i_dma_promise *promise;
> +	struct virt_dma_desc *vd;
> +	unsigned long flags;
> +	enum dma_status ret;
> +	size_t bytes = 0;
> +
> +	ret = dma_cookie_status(chan, cookie, state);
> +	if (ret == DMA_COMPLETE)
> +		return ret;
Pls check if state is valid before progressing ahead

> +
> +	spin_lock_irqsave(&vchan->vc.lock, flags);
> +	vd = vchan_find_desc(&vchan->vc, cookie);
> +	if (!vd)
> +		goto exit;
> +	contract = to_sun4i_dma_contract(vd);
> +
> +	list_for_each_entry(promise, &contract->demands, list)
> +		bytes += promise->len;
one thing which might help here is to keep this value in vd or perhaps
pre-compute this during the prepare

> +static int sun4i_dma_remove(struct platform_device *pdev)
> +{
> +	struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
> +
> +	/* Disable IRQ so no more work is scheduled */
> +	disable_irq(priv->irq);
> +
> +	of_dma_controller_free(pdev->dev.of_node);
> +	dma_async_device_unregister(&priv->slave);
> +
> +	clk_disable_unprepare(priv->clk);
you still have irq enabled here, expilcit call to free_irq helps that as well
as ensuring all the tasklets are executed before you remove the device

-- 
~Vinod

--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux