Hi Maxime, Vinod,
El 20/05/15 a las 18:17, Maxime Ripard escibió:
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun4i_dma_promise *promise;
+ struct sun4i_dma_contract *contract;
+ dma_addr_t src, dest;
+ u32 endpoints;
+ int nr_periods, offset, plength, i;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ if (vchan->is_dedicated) {
+ /*
+ * As we are using this just for audio data, we need to use
+ * normal DMA. There is nothing stopping us from supporting
+ * dedicated DMA here as well, so if a client comes up and
+ * requires it, it will be simple to implement it.
+ */
+ dev_err(chan2dev(chan),
+ "Cyclic transfers are only supported on Normal DMA\n");
+ return NULL;
+ }
+
+ contract = generate_dma_contract();
+ if (!contract)
+ return NULL;
+
+ contract->is_cyclic = 1;
+
+ /* Figure out the endpoints and the address we need */
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf;
+ dest = sconfig->dst_addr;
+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) |
+ NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
+ NDMA_CFG_DEST_FIXED_ADDR;
+ } else {
+ src = sconfig->src_addr;
+ dest = buf;
+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ NDMA_CFG_SRC_FIXED_ADDR |
+ NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM);
+ }
+
+ /*
+ * We will be using half done interrupts to make two periods
+ * out of a promise, so we need to program the DMA engine less
+ * often
+ */
+ nr_periods = DIV_ROUND_UP(len / period_len, 2);
and why is that.. why don't you use actual period count here?
I must admit I don't really know on this one.
Emilio?
You mean why is it that I chose to divide "len / period_len" (is there
some other way to get period count that I'm missing?) by 2 and use half
done interrupts? The engine can interrupt on half-transfer, so we can
use this feature to program the engine half as often as if we didn't use
it (keep in mind the hardware doesn't support linked lists).
Say you have a set of periods (| marks the start/end, I for interrupt, P
for programming the engine to do a new transfer), the easy but slow way
would be to do
|---|---|---|---| (periods / promises)
P I,P I,P I,P I
Using half transfer interrupts you can do
|-------|-------| (promises as configured on hw)
|---|---|---|---| (periods)
P I I,P I I
Which requires half the engine programming for the same functionality.
Feel free to include these drawings on the comment if you think they'll
help.
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun4i_dma_promise *promise;
+ struct sun4i_dma_contract *contract;
+ struct scatterlist *sg;
+ dma_addr_t srcaddr, dstaddr;
+ u32 endpoints, para;
+ int i;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ contract = generate_dma_contract();
+ if (!contract)
+ return NULL;
+
+ /* Figure out endpoints */
+ if (vchan->is_dedicated && dir == DMA_MEM_TO_DEV) {
+ endpoints = DDMA_CFG_SRC_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) |
+ DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_LINEAR) |
+ DDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
+ DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_IO);
+ } else if (!vchan->is_dedicated && dir == DMA_MEM_TO_DEV) {
+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) |
+ NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) |
+ NDMA_CFG_DEST_FIXED_ADDR;
+ } else if (vchan->is_dedicated) {
+ endpoints = DDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_IO) |
+ DDMA_CFG_DEST_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) |
+ DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_LINEAR);
+ } else {
+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ NDMA_CFG_SRC_FIXED_ADDR |
+ NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM);
+ }
I would like this to be reworked a bit with mapping of each endpoint
configuration to setting like dedicated and dir, that way adding future
value gets simpler by adding one more case rather than dense bring here
I don't really follow you on this one. You mean adding a
macro/function to generate the endpoints value?
FWIW, these 4 cases are all the cases possible.
+static int sun4i_dma_remove(struct platform_device *pdev)
+{
+ struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
+
+ /* Disable IRQ so no more work is scheduled */
+ disable_irq(priv->irq);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->slave);
+
+ clk_disable_unprepare(priv->clk);
you still have irq enabled here, expilcit call to free_irq helps that as well
as ensuring all the tasklets are executed before you remove the device
Why is that? Doesn't disable_irq disable the irq and wait for all of the
handlers to finish executing? That's what I get from the comment on
http://lxr.free-electrons.com/source/kernel/irq/manage.c#L424
Which tasklets? We don't use any in our driver.
That's true as well.
Cheers!
Emilio
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html