To support multi-channel functionality with AE4DMA engine, extend the PTDMA code with reusable components. Reviewed-by: Raju Rangoju <Raju.Rangoju@xxxxxxx> Signed-off-by: Basavaraj Natikar <Basavaraj.Natikar@xxxxxxx> --- drivers/dma/amd/ae4dma/ae4dma.h | 1 + drivers/dma/amd/common/amd_dma.h | 1 + drivers/dma/amd/ptdma/ptdma-dmaengine.c | 105 +++++++++++++++++++----- drivers/dma/amd/ptdma/ptdma.h | 2 + 4 files changed, 89 insertions(+), 20 deletions(-) diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h index 59a0ea3293cb..850ad1e49b51 100644 --- a/drivers/dma/amd/ae4dma/ae4dma.h +++ b/drivers/dma/amd/ae4dma/ae4dma.h @@ -15,6 +15,7 @@ #define MAX_AE4_HW_QUEUES 16 #define AE4_DESC_COMPLETED 0x3 +#define AE4_DMA_VERSION 4 struct ae4_msix { int msix_count; diff --git a/drivers/dma/amd/common/amd_dma.h b/drivers/dma/amd/common/amd_dma.h index 44251918f157..40f18fc912ae 100644 --- a/drivers/dma/amd/common/amd_dma.h +++ b/drivers/dma/amd/common/amd_dma.h @@ -21,6 +21,7 @@ #include <linux/wait.h> #include "../ptdma/ptdma.h" +#include "../ae4dma/ae4dma.h" #include "../../virt-dma.h" void pt_start_queue(struct pt_cmd_queue *cmd_q); diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c index 66ea10499643..90ca02fd5f8f 100644 --- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c @@ -43,7 +43,24 @@ static void pt_do_cleanup(struct virt_dma_desc *vd) kmem_cache_free(pt->dma_desc_cache, desc); } -static int pt_dma_start_desc(struct pt_dma_desc *desc) +static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan) +{ + struct ae4_cmd_queue *ae4cmd_q; + struct pt_cmd_queue *cmd_q; + struct ae4_device *ae4; + + if (pt->ver == AE4_DMA_VERSION) { + ae4 = container_of(pt, struct ae4_device, pt); + ae4cmd_q = &ae4->ae4cmd_q[chan->id]; + cmd_q = &ae4cmd_q->cmd_q; + } else { + cmd_q = &pt->cmd_q; + } + + return cmd_q; +} + +static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan) { struct pt_passthru_engine *pt_engine; struct pt_device *pt; @@ -54,7 +71,9 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc) pt_cmd = &desc->pt_cmd; pt = pt_cmd->pt; - cmd_q = &pt->cmd_q; + + cmd_q = pt_get_cmd_queue(pt, chan); + pt_engine = &pt_cmd->passthru; pt->tdata.cmd = pt_cmd; @@ -149,7 +168,7 @@ static void pt_cmd_callback(void *data, int err) if (!desc) break; - ret = pt_dma_start_desc(desc); + ret = pt_dma_start_desc(desc, chan); if (!ret) break; @@ -184,7 +203,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_passthru_engine *pt_engine; + struct pt_device *pt = chan->pt; + struct ae4_cmd_queue *ae4cmd_q; struct pt_dma_desc *desc; + struct ae4_device *ae4; struct pt_cmd *pt_cmd; desc = pt_alloc_dma_desc(chan, flags); @@ -192,7 +214,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, return NULL; pt_cmd = &desc->pt_cmd; - pt_cmd->pt = chan->pt; + pt_cmd->pt = pt; pt_engine = &pt_cmd->passthru; pt_cmd->engine = PT_ENGINE_PASSTHRU; pt_engine->src_dma = src; @@ -203,6 +225,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, desc->len = len; + if (pt->ver == AE4_DMA_VERSION) { + ae4 = container_of(pt, struct ae4_device, pt); + ae4cmd_q = &ae4->ae4cmd_q[chan->id]; + mutex_lock(&ae4cmd_q->cmd_lock); + list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd); + mutex_unlock(&ae4cmd_q->cmd_lock); + } + return desc; } @@ -260,8 +290,11 @@ static enum dma_status pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct pt_device *pt = to_pt_chan(c)->pt; - struct pt_cmd_queue *cmd_q = &pt->cmd_q; + struct pt_dma_chan *chan = to_pt_chan(c); + struct pt_device *pt = chan->pt; + struct pt_cmd_queue *cmd_q; + + cmd_q = pt_get_cmd_queue(pt, chan); pt_check_status_trans(pt, cmd_q); return dma_cookie_status(c, cookie, txstate); @@ -270,10 +303,13 @@ pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, static int pt_pause(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); + struct pt_device *pt = chan->pt; + struct pt_cmd_queue *cmd_q; unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); - pt_stop_queue(&chan->pt->cmd_q); + cmd_q = pt_get_cmd_queue(pt, chan); + pt_stop_queue(cmd_q); spin_unlock_irqrestore(&chan->vc.lock, flags); return 0; @@ -283,10 +319,13 @@ static int pt_resume(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_desc *desc = NULL; + struct pt_device *pt = chan->pt; + struct pt_cmd_queue *cmd_q; unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); - pt_start_queue(&chan->pt->cmd_q); + cmd_q = pt_get_cmd_queue(pt, chan); + pt_start_queue(cmd_q); desc = pt_next_dma_desc(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -300,11 +339,17 @@ static int pt_resume(struct dma_chan *dma_chan) static int pt_terminate_all(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); + struct pt_device *pt = chan->pt; + struct pt_cmd_queue *cmd_q; unsigned long flags; - struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q; LIST_HEAD(head); - iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); + cmd_q = pt_get_cmd_queue(pt, chan); + if (pt->ver == AE4_DMA_VERSION) + pt_stop_queue(cmd_q); + else + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); + spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -317,14 +362,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan) int pt_dmaengine_register(struct pt_device *pt) { - struct pt_dma_chan *chan; struct dma_device *dma_dev = &pt->dma_dev; - char *cmd_cache_name; + struct ae4_cmd_queue *ae4cmd_q = NULL; + struct ae4_device *ae4 = NULL; + struct pt_dma_chan *chan; char *desc_cache_name; - int ret; + char *cmd_cache_name; + int ret, i; + + if (pt->ver == AE4_DMA_VERSION) + ae4 = container_of(pt, struct ae4_device, pt); + + if (ae4) + pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count, + sizeof(*pt->pt_dma_chan), GFP_KERNEL); + else + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan), + GFP_KERNEL); - pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan), - GFP_KERNEL); if (!pt->pt_dma_chan) return -ENOMEM; @@ -366,9 +421,6 @@ int pt_dmaengine_register(struct pt_device *pt) INIT_LIST_HEAD(&dma_dev->channels); - chan = pt->pt_dma_chan; - chan->pt = pt; - /* Set base and prep routines */ dma_dev->device_free_chan_resources = pt_free_chan_resources; dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; @@ -380,8 +432,21 @@ int pt_dmaengine_register(struct pt_device *pt) dma_dev->device_terminate_all = pt_terminate_all; dma_dev->device_synchronize = pt_synchronize; - chan->vc.desc_free = pt_do_cleanup; - vchan_init(&chan->vc, dma_dev); + if (ae4) { + for (i = 0; i < ae4->cmd_q_count; i++) { + chan = pt->pt_dma_chan + i; + ae4cmd_q = &ae4->ae4cmd_q[i]; + chan->id = ae4cmd_q->id; + chan->pt = pt; + chan->vc.desc_free = pt_do_cleanup; + vchan_init(&chan->vc, dma_dev); + } + } else { + chan = pt->pt_dma_chan; + chan->pt = pt; + chan->vc.desc_free = pt_do_cleanup; + vchan_init(&chan->vc, dma_dev); + } ret = dma_async_device_register(dma_dev); if (ret) diff --git a/drivers/dma/amd/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h index b4f9ee83b074..d9e4ff942901 100644 --- a/drivers/dma/amd/ptdma/ptdma.h +++ b/drivers/dma/amd/ptdma/ptdma.h @@ -184,6 +184,7 @@ struct pt_dma_desc { struct pt_dma_chan { struct virt_dma_chan vc; struct pt_device *pt; + u32 id; }; struct pt_cmd_queue { @@ -262,6 +263,7 @@ struct pt_device { unsigned long total_interrupts; struct pt_tasklet_data tdata; + int ver; }; /* -- 2.25.1