Re: [V2] spi: spi-geni-qcom: Add support for SE DMA mode

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Thanks for your patch Vijaya,

On 21/11/2022 14:19, Vijaya Krishna Nivarthi wrote:
SE DMA mode can be used for larger transfers and FIFO mode
for smaller transfers.

Over all the patch looks good, but with few minor nits around coding conventions.

Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@xxxxxxxxxxx>
---
  drivers/spi/spi-geni-qcom.c | 211 ++++++++++++++++++++++++++++++++++----------
  1 file changed, 165 insertions(+), 46 deletions(-)

diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4e83cc5..102529a 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -87,6 +87,8 @@ struct spi_geni_master {
  	struct completion cs_done;
  	struct completion cancel_done;
  	struct completion abort_done;
+	struct completion tx_reset_done;
+	struct completion rx_reset_done;
  	unsigned int oversampling;
  	spinlock_t lock;
  	int irq;
@@ -95,6 +97,7 @@ struct spi_geni_master {
  	struct dma_chan *tx;
  	struct dma_chan *rx;
  	int cur_xfer_mode;
+	u32 cur_m_cmd;
  };
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -129,23 +132,26 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
  	return ret;
  }
-static void handle_fifo_timeout(struct spi_master *spi,
+static void handle_se_timeout(struct spi_master *spi,
  				struct spi_message *msg)
indentation looks off.


  {
  	struct spi_geni_master *mas = spi_master_get_devdata(spi);
  	unsigned long time_left;
  	struct geni_se *se = &mas->se;
+	const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock);
  	reinit_completion(&mas->cancel_done);
-	writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+	if (mas->cur_xfer_mode == GENI_SE_FIFO)
+		writel(0, se->base + SE_GENI_TX_WATERMARK_REG);

empty line here would make the code more readable.

+	xfer = mas->cur_xfer;
  	mas->cur_xfer = NULL;
  	geni_se_cancel_m_cmd(se);
  	spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
  	if (time_left)
-		return;
+		goto unmap_if_dma;
spin_lock_irq(&mas->lock);
  	reinit_completion(&mas->abort_done);
@@ -162,6 +168,45 @@ static void handle_fifo_timeout(struct spi_master *spi,
  		 */
  		mas->abort_failed = true;
  	}
+
+unmap_if_dma:
+	if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		if (mas->cur_m_cmd & SPI_TX_ONLY) {
+			spin_lock_irq(&mas->lock);
+			reinit_completion(&mas->tx_reset_done);
+			writel(1, se->base + SE_DMA_TX_FSM_RST);
+			spin_unlock_irq(&mas->lock);
+			time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
+			if (!time_left)
+				dev_err(mas->dev, "DMA TX RESET failed\n");
+		}
+		if (mas->cur_m_cmd & SPI_RX_ONLY) {
+			spin_lock_irq(&mas->lock);
+			reinit_completion(&mas->rx_reset_done);
+			writel(1, se->base + SE_DMA_RX_FSM_RST);
+			spin_unlock_irq(&mas->lock);
+			time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
+			if (!time_left)
+				dev_err(mas->dev, "DMA RX RESET failed\n");
+		}
+
+		if (xfer) {
+			if (xfer->tx_buf && xfer->tx_dma)
+				geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf && xfer->rx_dma)
+				geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
+		} else {
+			/*
+			 * This can happen if a timeout happened and we had to wait
+			 * for lock in this function because isr was holding the lock
+			 * and handling transfer completion at that time.
+			 * isr will set cur_xfer to NULL when done.
+			 * Unnecessary error but cannot be helped.
+			 * Only do reset, dma_unprep is already done by isr.
+			 */
+			dev_err(mas->dev, "Cancel/Abort on completed SPI transfer\n");
+		}
+	}
  }
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
@@ -178,7 +223,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
switch (mas->cur_xfer_mode) {
  	case GENI_SE_FIFO:
-		handle_fifo_timeout(spi, msg);
+	case GENI_SE_DMA:
+		handle_se_timeout(spi, msg);
  		break;
  	case GENI_GPI_DMA:
  		handle_gpi_timeout(spi, msg);
@@ -260,7 +306,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
  	time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
  	if (!time_left) {
  		dev_warn(mas->dev, "Timeout setting chip select\n");
-		handle_fifo_timeout(spi, NULL);
+		handle_se_timeout(spi, NULL);
  	}
exit:
@@ -482,8 +528,12 @@ static bool geni_can_dma(struct spi_controller *ctlr,
  {
  	struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
- /* check if dma is supported */
-	return mas->cur_xfer_mode != GENI_SE_FIFO;
+	/*
+	 * Return true if transfer needs to be mapped prior to
+	 * calling transfer_one which is the case only for GPI_DMA.
+	 * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
+	 */
+	return mas->cur_xfer_mode == GENI_GPI_DMA;
  }
static int spi_geni_prepare_message(struct spi_master *spi,
@@ -494,6 +544,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
switch (mas->cur_xfer_mode) {
  	case GENI_SE_FIFO:
+	case GENI_SE_DMA:
  		if (spi_geni_is_abort_still_pending(mas))
  			return -EBUSY;
  		ret = setup_fifo_params(spi_msg->spi, spi);
@@ -597,7 +648,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
  			break;
  		}
  		/*
-		 * in case of failure to get dma channel, we can still do the
+		 * in case of failure to get gpi dma channel, we can still do the
  		 * FIFO mode, so fallthrough
  		 */
  		dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
@@ -716,12 +767,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
  	mas->rx_rem_bytes -= rx_bytes;
  }
-static void setup_fifo_xfer(struct spi_transfer *xfer,
+static int setup_se_xfer(struct spi_transfer *xfer,
  				struct spi_geni_master *mas,
  				u16 mode, struct spi_master *spi)

consider adjusting the tabs once you change the function name.
  {
  	u32 m_cmd = 0;
-	u32 len;
+	u32 len, fifo_size;
  	struct geni_se *se = &mas->se;
  	int ret;
@@ -748,7 +799,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
  	/* Speed and bits per word can be overridden per transfer */
  	ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
  	if (ret)
-		return;
+		return ret;
mas->tx_rem_bytes = 0;
  	mas->rx_rem_bytes = 0;xxxxxxx
@@ -771,6 +822,13 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
  		writel(len, se->base + SE_SPI_RX_TRANS_LEN);
  		mas->rx_rem_bytes = xfer->len;
  	}
+	mas->cur_m_cmd = m_cmd;
+
+	/* Select transfer mode based on transfer length */
+	fifo_size =
+		mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;

line can go up to 100 chars


+	mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;

I do not see any protection for cur_xfer_mode? Isn't it true that it could be modified here while an interrupt handler is using this?


+	geni_se_select_mode(se, mas->cur_xfer_mode);
/*
  	 * Lock around right before we start the transfer since our
@@ -778,11 +836,39 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
  	 */
  	spin_lock_irq(&mas->lock);
  	geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
-	if (m_cmd & SPI_TX_ONLY) {
+
+	if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		if (m_cmd & SPI_RX_ONLY) {
+			ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
+				xfer->len, &xfer->rx_dma);
+			if (ret) {
+				dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
+				xfer->rx_dma = 0;
+				goto unlock_and_return;
+			}
+		}
+		if (m_cmd & SPI_TX_ONLY) {
+			ret =  geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
+				xfer->len, &xfer->tx_dma);
+			if (ret) {
+				dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
+				xfer->tx_dma = 0;
+				if (m_cmd & SPI_RX_ONLY && xfer->rx_dma) {
+					/* Unmap rx buffer if duplex transfer */
+					geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
+					xfer->rx_dma = 0;
+				}
+				goto unlock_and_return;
+			}
+		}
+	} else if (m_cmd & SPI_TX_ONLY) {
  		if (geni_spi_handle_tx(mas))
  			writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
  	}
+
+unlock_and_return:
  	spin_unlock_irq(&mas->lock);
+	return ret;
  }
static int spi_geni_transfer_one(struct spi_master *spi,
@@ -790,6 +876,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
  				struct spi_transfer *xfer)
  {
  	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int ret;
if (spi_geni_is_abort_still_pending(mas))
  		return -EBUSY;
@@ -798,9 +885,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
  	if (!xfer->len)
  		return 0;
- if (mas->cur_xfer_mode == GENI_SE_FIFO) {
-		setup_fifo_xfer(xfer, mas, slv->mode, spi);
-		return 1;
+	if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
+		ret = setup_se_xfer(xfer, mas, slv->mode, spi);
+		/* SPI framework expects +ve ret code to wait for transfer complete */
+		if (!ret)
+			ret = 1;
+		return ret;
  	}
  	return setup_gsi_xfer(xfer, mas, slv, spi);
  }
@@ -823,39 +913,66 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
spin_lock(&mas->lock); - if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
-		geni_spi_handle_rx(mas);
-
-	if (m_irq & M_TX_FIFO_WATERMARK_EN)
-		geni_spi_handle_tx(mas);
-
-	if (m_irq & M_CMD_DONE_EN) {
-		if (mas->cur_xfer) {
+	if (mas->cur_xfer_mode == GENI_SE_FIFO) {

Switch case?

...

+		}
+	} else if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		const struct spi_transfer *xfer = mas->cur_xfer;


--srini



[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux