Check capabilities of DMA controller during init to make sure it is capable of handling MEM2DEV for tx channel, DEV2MEM for rx channel. Current DW DMA driver requires both tx and rx channel to be configured and functional for any kind of transfers to take effect including half duplex. Hence, check for both tx and rx direction and fail on unavailbility of either. Signed-off-by: Joy Chakraborty <joychakr@xxxxxxxxxx> Reviewed-by: Serge Semin <fancer.lancer@xxxxxxxxx> Tested-by: Serge Semin <fancer.lancer@xxxxxxxxx> * tested on Baikal-T1 based system with DW SPI-looped back interface transferring a chunk of data with DFS:8,12,16. --- drivers/spi/spi-dw-dma.c | 41 +++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index f19c092920a1..2363317a0dca 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -72,12 +72,22 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws) dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); } -static void dw_spi_dma_sg_burst_init(struct dw_spi *dws) +static int dw_spi_dma_caps_init(struct dw_spi *dws) { - struct dma_slave_caps tx = {0}, rx = {0}; + struct dma_slave_caps tx, rx; + int ret; + + ret = dma_get_slave_caps(dws->txchan, &tx); + if (ret) + return ret; - dma_get_slave_caps(dws->txchan, &tx); - dma_get_slave_caps(dws->rxchan, &rx); + ret = dma_get_slave_caps(dws->rxchan, &rx); + if (ret) + return ret; + + if (!(tx.directions & BIT(DMA_MEM_TO_DEV) && + rx.directions & BIT(DMA_DEV_TO_MEM))) + return -ENXIO; if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); @@ -87,6 +97,8 @@ static void dw_spi_dma_sg_burst_init(struct dw_spi *dws) dws->dma_sg_burst = rx.max_sg_burst; else dws->dma_sg_burst = 0; + + return 0; } static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) @@ -95,6 +107,7 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; struct pci_dev *dma_dev; dma_cap_mask_t mask; + int ret = -EBUSY; /* * Get pci device for DMA controller, currently it could only @@ -124,20 +137,25 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) init_completion(&dws->dma_completion); - dw_spi_dma_maxburst_init(dws); + ret = dw_spi_dma_caps_init(dws); + if (ret) + goto free_txchan; - dw_spi_dma_sg_burst_init(dws); + dw_spi_dma_maxburst_init(dws); pci_dev_put(dma_dev); return 0; +free_txchan: + dma_release_channel(dws->txchan); + dws->txchan = NULL; free_rxchan: dma_release_channel(dws->rxchan); dws->rxchan = NULL; err_exit: pci_dev_put(dma_dev); - return -EBUSY; + return ret; } static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) @@ -163,12 +181,17 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) init_completion(&dws->dma_completion); - dw_spi_dma_maxburst_init(dws); + ret = dw_spi_dma_caps_init(dws); + if (ret) + goto free_txchan; - dw_spi_dma_sg_burst_init(dws); + dw_spi_dma_maxburst_init(dws); return 0; +free_txchan: + dma_release_channel(dws->txchan); + dws->txchan = NULL; free_rxchan: dma_release_channel(dws->rxchan); dws->rxchan = NULL; -- 2.40.1.521.gf1e218fcd8-goog