The resent update to remove the orig_nents checks revealed that not all DMA sync backends can cope with the unallocated SG list, while supplying orig_nents == 0 (the commit 861370f49ce4 ("iommu/dma: force bouncing if the size is not cacheline-aligned"), for example, makes that happen for the IOMMU case). It means we have to check if the buffers are DMA mapped before trying to sync them. Re-introduce that check in a form of calling ->can_dma() in the same way as it's done in the DMA mapping loop for the SPI transfers. Reported-by: Nícolas F. R. A. Prado <nfraprado@xxxxxxxxxxxxx> Reported-by: Neil Armstrong <neil.armstrong@xxxxxxxxxx> Closes: https://lore.kernel.org/r/8ae675b5-fcf9-4c9b-b06a-4462f70e1322@xxxxxxxxxx Closes: https://lore.kernel.org/all/d3679496-2e4e-4a7c-97ed-f193bd53af1d@notapiano Fixes: 8cc3bad9d9d6 ("spi: Remove unneded check for orig_nents") Suggested-by: Nícolas F. R. A. Prado <nfraprado@xxxxxxxxxxxxx> Tested-by: Nícolas F. R. A. Prado <nfraprado@xxxxxxxxxxxxx> Signed-off-by: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx> --- drivers/spi/spi.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 51811f04e463..cc8bb7d5ba1a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1311,7 +1311,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) return 0; } -static void spi_dma_sync_for_device(struct spi_controller *ctlr, +static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1320,11 +1320,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } -static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, +static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1333,6 +1336,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } @@ -1350,11 +1356,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr, } static void spi_dma_sync_for_device(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } @@ -1626,10 +1634,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, reinit_completion(&ctlr->xfer_completion); fallback_pio: - spi_dma_sync_for_device(ctlr, xfer); + spi_dma_sync_for_device(ctlr, msg, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); if (ctlr->cur_msg_mapped && (xfer->error & SPI_TRANS_FAIL_NO_START)) { @@ -1654,7 +1662,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, msg->status = ret; } - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, -- 2.43.0.rc1.1336.g36b5255a03ac