On 2020-03-24 8:48 PM, Michael Walle wrote: > If the kernel console output is on this console any > dev_{err,warn,info}() may result in a deadlock if the sport->port.lock > spinlock is already held. This is because the _console_write() try to > aquire this lock, too. Remove any error messages where the spinlock is > taken or print after the lock is released. > > Reported-by: Leonard Crestez <leonard.crestez@xxxxxxx> > Signed-off-by: Michael Walle <michael@xxxxxxxx> It seems that this was an issue even before commit 159381df1442 ("tty: serial: fsl_lpuart: fix DMA operation when using IOMMU") but these error prints never triggered. Would it be possible to move all the dma alloc/config/prep outside the serial port lock? As it stands this still calls into dmaengine coode and that might decide to print as well. Really I don't think the lock needs to protect more than bits like TDMAE/RDMAE. BTW: You should add more people in CC for reviews, for example linux-imx@xxxxxxx is checked by a lot of people. > --- > drivers/tty/serial/fsl_lpuart.c | 35 +++++++-------------------------- > 1 file changed, 7 insertions(+), 28 deletions(-) > > diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c > index bbba298b68a4..0910308b38b1 100644 > --- a/drivers/tty/serial/fsl_lpuart.c > +++ b/drivers/tty/serial/fsl_lpuart.c > @@ -420,7 +420,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport) > { > struct circ_buf *xmit = &sport->port.state->xmit; > struct scatterlist *sgl = sport->tx_sgl; > - struct device *dev = sport->port.dev; > struct dma_chan *chan = sport->dma_tx_chan; > int ret; > > @@ -442,10 +441,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport) > > ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents, > DMA_TO_DEVICE); > - if (!ret) { > - dev_err(dev, "DMA mapping error for TX.\n"); > + if (!ret) > return; > - } > > sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl, > ret, DMA_MEM_TO_DEV, > @@ -453,7 +450,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport) > if (!sport->dma_tx_desc) { > dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, > DMA_TO_DEVICE); > - dev_err(dev, "Cannot prepare TX slave DMA!\n"); > return; > } > > @@ -520,21 +516,12 @@ static int lpuart_dma_tx_request(struct uart_port *port) > struct lpuart_port *sport = container_of(port, > struct lpuart_port, port); > struct dma_slave_config dma_tx_sconfig = {}; > - int ret; > > dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport); > dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; > dma_tx_sconfig.dst_maxburst = 1; > dma_tx_sconfig.direction = DMA_MEM_TO_DEV; > - ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig); > - > - if (ret) { > - dev_err(sport->port.dev, > - "DMA slave config failed, err = %d\n", ret); > - return ret; > - } > - > - return 0; > + return dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig); > } > > static bool lpuart_is_32(struct lpuart_port *sport) > @@ -1074,8 +1061,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) > > dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state); > if (dmastat == DMA_ERROR) { > - dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); > spin_unlock_irqrestore(&sport->port.lock, flags); > + dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); > return; > } > > @@ -1179,23 +1166,17 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) > sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); > nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1, > DMA_FROM_DEVICE); > - > - if (!nent) { > - dev_err(sport->port.dev, "DMA Rx mapping error\n"); > + if (!nent) > return -EINVAL; > - } > > dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport); > dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; > dma_rx_sconfig.src_maxburst = 1; > dma_rx_sconfig.direction = DMA_DEV_TO_MEM; > - ret = dmaengine_slave_config(chan, &dma_rx_sconfig); > > - if (ret < 0) { > - dev_err(sport->port.dev, > - "DMA Rx slave config failed, err = %d\n", ret); > + ret = dmaengine_slave_config(chan, &dma_rx_sconfig); > + if (ret < 0) > return ret; > - } > > sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan, > sg_dma_address(&sport->rx_sgl), > @@ -1203,10 +1184,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) > sport->rx_sgl.length / 2, > DMA_DEV_TO_MEM, > DMA_PREP_INTERRUPT); > - if (!sport->dma_rx_desc) { > - dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); > + if (!sport->dma_rx_desc) > return -EFAULT; > - } > > sport->dma_rx_desc->callback = lpuart_dma_rx_complete; > sport->dma_rx_desc->callback_param = sport; >