This is a note to let you know that I've just added the patch titled dmaengine: dw-edma: Fix to change for continuous transfer to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: dmaengine-dw-edma-fix-to-change-for-continuous-trans.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit f424ab31e479a8f17ad7a2ec985d254d5d943d53 Author: Shunsuke Mie <mie@xxxxxxxxxx> Date: Tue Apr 11 19:17:57 2023 +0900 dmaengine: dw-edma: Fix to change for continuous transfer [ Upstream commit a251994a441ee0a69ba7062c8cd2d08ead3db379 ] The dw-edma driver stops after processing a DMA request even if a request remains in the issued queue, which is not the expected behavior. The DMA engine API requires continuous processing. Add a trigger to start after one processing finished if there are requests remain. Fixes: e63d79d1ffcd ("dmaengine: Add Synopsys eDMA IP core driver") Signed-off-by: Shunsuke Mie <mie@xxxxxxxxxx> Link: https://lore.kernel.org/r/20230411101758.438472-1-mie@xxxxxxxxxx Signed-off-by: Vinod Koul <vkoul@xxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 97f5e4e93cfc6..2a434c255b4a2 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -171,7 +171,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc) dw_edma_free_desc(vd2dw_edma_desc(vdesc)); } -static void dw_edma_start_transfer(struct dw_edma_chan *chan) +static int dw_edma_start_transfer(struct dw_edma_chan *chan) { struct dw_edma_chunk *child; struct dw_edma_desc *desc; @@ -179,16 +179,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan) vd = vchan_next_desc(&chan->vc); if (!vd) - return; + return 0; desc = vd2dw_edma_desc(vd); if (!desc) - return; + return 0; child = list_first_entry_or_null(&desc->chunk->list, struct dw_edma_chunk, list); if (!child) - return; + return 0; dw_edma_v0_core_start(child, !desc->xfer_sz); desc->xfer_sz += child->ll_region.sz; @@ -196,6 +196,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan) list_del(&child->list); kfree(child); desc->chunks_alloc--; + + return 1; } static int dw_edma_device_config(struct dma_chan *dchan, @@ -555,14 +557,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan) switch (chan->request) { case EDMA_REQ_NONE: desc = vd2dw_edma_desc(vd); - if (desc->chunks_alloc) { - chan->status = EDMA_ST_BUSY; - dw_edma_start_transfer(chan); - } else { + if (!desc->chunks_alloc) { list_del(&vd->node); vchan_cookie_complete(vd); - chan->status = EDMA_ST_IDLE; } + + /* Continue transferring if there are remaining chunks or issued requests. + */ + chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; break; case EDMA_REQ_STOP: