Re: [PATCH v3 3/3] dma: omap-dma: add support for pause of non-cyclic transfers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 08/07/2015 11:00 PM, Sebastian Andrzej Siewior wrote:
> -static void omap_dma_stop(struct omap_chan *c)
> +static void omap_dma_drain_chan(struct omap_chan *c)
> +{
> +	int i;
> +	uint32_t val;
> +
> +	/* Wait for sDMA FIFO to drain */
> +	for (i = 0; ; i++) {
> +		val = omap_dma_chan_read(c, CCR);
> +		if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
> +			break;
> +
> +		if (i > 100)
> +			break;
> +
> +		udelay(5);
> +	}
> +
> +	if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
> +		dev_err(c->vc.chan.device->dev,
> +			"DMA drain did not complete on lch %d\n",
> +			c->dma_ch);
> +}
> +
> +static int omap_dma_stop(struct omap_chan *c)
>  {
>  	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
>  	uint32_t val;
> @@ -312,7 +335,6 @@ static void omap_dma_stop(struct omap_chan *c)
>  	val = omap_dma_chan_read(c, CCR);
>  	if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
>  		uint32_t sysconfig;
> -		unsigned i;
>  
>  		sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
>  		val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
> @@ -323,27 +345,18 @@ static void omap_dma_stop(struct omap_chan *c)
>  		val &= ~CCR_ENABLE;
>  		omap_dma_chan_write(c, CCR, val);
>  
> -		/* Wait for sDMA FIFO to drain */
> -		for (i = 0; ; i++) {
> -			val = omap_dma_chan_read(c, CCR);
> -			if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
> -				break;
> -
> -			if (i > 100)
> -				break;
> -
> -			udelay(5);
> -		}
> -
> -		if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
> -			dev_err(c->vc.chan.device->dev,
> -				"DMA drain did not complete on lch %d\n",
> -			        c->dma_ch);
> +		omap_dma_drain_chan(c);
>  
>  		omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
>  	} else {
> +
> +		if (!(val & CCR_ENABLE))
> +			return -EINVAL;
> +
>  		val &= ~CCR_ENABLE;
>  		omap_dma_chan_write(c, CCR, val);
> +
> +		omap_dma_drain_chan(c);

Note that the FIFO drain only applies to source synchronized transfers... When
the BUFFERING is _not_ disabled - in most of the cases this is true.

> +	/*
> +	 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
> +	 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
> +	 * "When a channel is disabled during a transfer, the channel undergoes
> +	 * an abort, unless it is hardware-source-synchronized …".
> +	 * A source-synchronised channel is one where the fetching of data is
> +	 * under control of the device. In other words, a device-to-memory
> +	 * transfer. So, a destination-synchronised channel (which would be a
> +	 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
> +	 * bit is cleared.
> +	 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
> +	 * aborts immediately after completion of current read/write
> +	 * transactions and then the FIFO is cleaned up." The term "cleaned up"
> +	 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
> +	 * are both clear _before_ disabling the channel, otherwise data loss
> +	 * will occur.
> +	 * The problem is that if the channel is active, then device activity
> +	 * can result in DMA activity starting between reading those as both
> +	 * clear and the write to DMA_CCR to clear the enable bit hitting the
> +	 * hardware. If the DMA hardware can't drain the data in its FIFO to the
> +	 * destination, then data loss "might" occur (say if we write to an UART
> +	 * and the UART is not accepting any further data).

I don't know if you have checked it, but probably the TX DMA could be also
used when the PRZEFETCH is disabled for the channel? Just a guess

> +	 */
> +	else if (c->desc->dir == DMA_DEV_TO_MEM)
> +		can_pause = true;
> +
> +	if (can_pause && !c->paused) {
> +		ret = omap_dma_stop(c);
> +		if (!ret)
> +			c->paused = true;
>  	}
> +out:
> +	spin_unlock_irqrestore(&od->irq_lock, flags);
>  
> -	return 0;
> +	return ret;
>  }
>  
>  static int omap_dma_resume(struct dma_chan *chan)
>  {
>  	struct omap_chan *c = to_omap_dma_chan(chan);
> +	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
> +	unsigned long flags;
> +	int ret = -EINVAL;
>  
> -	/* Pause/Resume only allowed with cyclic mode */
> -	if (!c->cyclic)
> -		return -EINVAL;
> +	spin_lock_irqsave(&od->irq_lock, flags);
>  
> -	if (c->paused) {
> +	if (c->paused && c->desc) {
>  		mb();
>  
>  		/* Restore channel link register */
> @@ -1082,9 +1134,11 @@ static int omap_dma_resume(struct dma_chan *chan)
>  
>  		omap_dma_start(c, c->desc);
>  		c->paused = false;
> +		ret = 0;
>  	}
> +	spin_unlock_irqrestore(&od->irq_lock, flags);
>  
> -	return 0;
> +	return ret;
>  }
>  
>  static int omap_dma_chan_init(struct omap_dmadev *od)
> 


-- 
Péter
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux