Mainly this is a preparation patch before adding one-by-one DMA SG entries transmission. But logically the Tx and Rx DMA channels setup should be performed in the dma_setup() callback anyway. So we'll move the DMA slave channels src/dst burst lengths, address and address width configuration from the Tx/Rx channels preparation methods to the dedicated functions and then make sure it's called at the DMA setup stage. Note we now make sure the return value of the dmaengine_slave_config() method doesn't indicate an error. It has been unnecessary in case if Dw DMAC is utilized as a DMA engine, since its device_config() callback always returns zero (though it might change in future). But since DW APB SSI driver now supports any DMA back-end we must make sure the DMA device configuration has been successful before proceeding with further setups. Signed-off-by: Serge Semin <Sergey.Semin@xxxxxxxxxxxxxxxxxxxx> --- drivers/spi/spi-dw-dma.c | 42 +++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index 1b013ac94a3f..da17897b8acb 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -256,11 +256,9 @@ static void dw_spi_dma_tx_done(void *arg) complete(&dws->dma_completion); } -static struct dma_async_tx_descriptor * -dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) +static int dw_spi_dma_config_tx(struct dw_spi *dws) { struct dma_slave_config txconf; - struct dma_async_tx_descriptor *txdesc; memset(&txconf, 0, sizeof(txconf)); txconf.direction = DMA_MEM_TO_DEV; @@ -270,7 +268,13 @@ dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); txconf.device_fc = false; - dmaengine_slave_config(dws->txchan, &txconf); + return dmaengine_slave_config(dws->txchan, &txconf); +} + +static struct dma_async_tx_descriptor * +dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) +{ + struct dma_async_tx_descriptor *txdesc; txdesc = dmaengine_prep_slave_sg(dws->txchan, xfer->tx_sg.sgl, @@ -345,14 +349,9 @@ static void dw_spi_dma_rx_done(void *arg) complete(&dws->dma_completion); } -static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, - struct spi_transfer *xfer) +static int dw_spi_dma_config_rx(struct dw_spi *dws) { struct dma_slave_config rxconf; - struct dma_async_tx_descriptor *rxdesc; - - if (!xfer->rx_buf) - return NULL; memset(&rxconf, 0, sizeof(rxconf)); rxconf.direction = DMA_DEV_TO_MEM; @@ -362,7 +361,16 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); rxconf.device_fc = false; - dmaengine_slave_config(dws->rxchan, &rxconf); + return dmaengine_slave_config(dws->rxchan, &rxconf); +} + +static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + struct spi_transfer *xfer) +{ + struct dma_async_tx_descriptor *rxdesc; + + if (!xfer->rx_buf) + return NULL; rxdesc = dmaengine_prep_slave_sg(dws->rxchan, xfer->rx_sg.sgl, @@ -381,10 +389,22 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) { u16 imr, dma_ctrl; + int ret; if (!xfer->tx_buf) return -EINVAL; + /* Setup DMA channels */ + ret = dw_spi_dma_config_tx(dws); + if (ret) + return ret; + + if (xfer->rx_buf) { + ret = dw_spi_dma_config_rx(dws); + if (ret) + return ret; + } + /* Set the DMA handshaking interface */ dma_ctrl = SPI_DMA_TDMAE; if (xfer->rx_buf) -- 2.27.0