This is a note to let you know that I've just added the patch titled dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx to the 4.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: dmaengine-mv_xor_v2-do-not-use-descriptors-not-acked-by-async_tx.patch and it can be found in the queue-4.9 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From bc473da1ed726c975ad47f8d7d27631de11356d8 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni <thomas.petazzoni@xxxxxxxxxxxxxxxxxx> Date: Fri, 5 May 2017 11:57:46 +0200 Subject: dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx From: Thomas Petazzoni <thomas.petazzoni@xxxxxxxxxxxxxxxxxx> commit bc473da1ed726c975ad47f8d7d27631de11356d8 upstream. Descriptors that have not been acknowledged by the async_tx layer should not be re-used, so this commit adjusts the implementation of mv_xor_v2_prep_sw_desc() to skip descriptors for which async_tx_test_ack() is false. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Signed-off-by: Thomas Petazzoni <thomas.petazzoni@xxxxxxxxxxxxxxxxxx> Signed-off-by: Vinod Koul <vinod.koul@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/dma/mv_xor_v2.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -344,6 +344,7 @@ static struct mv_xor_v2_sw_desc * mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) { struct mv_xor_v2_sw_desc *sw_desc; + bool found = false; /* Lock the channel */ spin_lock_bh(&xor_dev->lock); @@ -355,19 +356,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_ return NULL; } - /* get a free SW descriptor from the SW DESQ */ - sw_desc = list_first_entry(&xor_dev->free_sw_desc, - struct mv_xor_v2_sw_desc, free_list); + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { + if (async_tx_test_ack(&sw_desc->async_tx)) { + found = true; + break; + } + } + + if (!found) { + spin_unlock_bh(&xor_dev->lock); + return NULL; + } + list_del(&sw_desc->free_list); /* Release the channel */ spin_unlock_bh(&xor_dev->lock); - /* set the async tx descriptor */ - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; - async_tx_ack(&sw_desc->async_tx); - return sw_desc; } @@ -785,8 +790,15 @@ static int mv_xor_v2_probe(struct platfo /* add all SW descriptors to the free list */ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { - xor_dev->sw_desq[i].idx = i; - list_add(&xor_dev->sw_desq[i].free_list, + struct mv_xor_v2_sw_desc *sw_desc = + xor_dev->sw_desq + i; + sw_desc->idx = i; + dma_async_tx_descriptor_init(&sw_desc->async_tx, + &xor_dev->dmachan); + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; + async_tx_ack(&sw_desc->async_tx); + + list_add(&sw_desc->free_list, &xor_dev->free_sw_desc); } Patches currently in stable-queue which might be from thomas.petazzoni@xxxxxxxxxxxxxxxxxx are queue-4.9/dmaengine-mv_xor_v2-properly-handle-wrapping-in-the-array-of-hw-descriptors.patch queue-4.9/dmaengine-mv_xor_v2-enable-xor-engine-after-its-configuration.patch queue-4.9/dmaengine-mv_xor_v2-set-dma-mask-to-40-bits.patch queue-4.9/dmaengine-mv_xor_v2-do-not-use-descriptors-not-acked-by-async_tx.patch queue-4.9/dmaengine-mv_xor_v2-handle-mv_xor_v2_prep_sw_desc-error-properly.patch queue-4.9/dmaengine-mv_xor_v2-remove-interrupt-coalescing.patch queue-4.9/dmaengine-mv_xor_v2-fix-tx_submit-implementation.patch