> +static void sci_request_dma(struct uart_port *port) > +{ > + struct sci_port *s = to_sci_port(port); > + struct sh_dmae_slave *param; > + struct dma_chan *chan; > + dma_cap_mask_t mask; > + int nent; > + > + dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, > + port->line, s->dma_dev); > + > + if (!s->dma_dev) > + return; > + > + dma_cap_zero(mask); > + dma_cap_set(DMA_SLAVE, mask); > + > + param = &s->param_tx; > + > + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ > + param->slave_id = s->slave_tx; > + param->dma_dev = s->dma_dev; > + > + s->cookie_tx = -EINVAL; > + chan = dma_request_channel(mask, filter, param); > + dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); > + if (chan) { > + s->chan_tx = chan; > + sg_init_table(&s->sg_tx, 1); > + /* UART circular tx buffer is an aligned page. */ > + BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); > + sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), > + UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); > + nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); > + if (!nent) > + sci_tx_dma_release(s, false); > + else > + dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, > + sg_dma_len(&s->sg_tx), > + port->state->xmit.buf, sg_dma_address(&s->sg_tx)); > + > + s->sg_len_tx = nent; > + > + INIT_WORK(&s->work_tx, work_fn_tx); > + } > + > + param = &s->param_rx; > + > + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ > + param->slave_id = s->slave_rx; > + param->dma_dev = s->dma_dev; > + > + chan = dma_request_channel(mask, filter, param); > + dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); If dma channel allocation fails how this scenario will be handled, There is an possibility of dma_request_channel returning -EBUSY, > + if (chan) { > + dma_addr_t dma[2]; > + void *buf[2]; > + int i; > + > + s->chan_rx = chan; > + > + s->buf_len_rx = 2 * max(16, (int)port->fifosize); > + buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, > + &dma[0], GFP_KERNEL); > + > + if (!buf[0]) { > + dev_warn(port->dev, > + "failed to allocate dma buffer, using PIO\n"); > + sci_rx_dma_release(s, true); > + return; > + } > + > + buf[1] = buf[0] + s->buf_len_rx; > + dma[1] = dma[0] + s->buf_len_rx; > + > + for (i = 0; i < 2; i++) { > + struct scatterlist *sg = &s->sg_rx[i]; > + > + sg_init_table(sg, 1); > + sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, > + (int)buf[i] & ~PAGE_MASK); > + sg->dma_address = dma[i]; > + sg->dma_length = sg->length; > + } > + > + INIT_WORK(&s->work_rx, work_fn_rx); > + setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); > + > + sci_submit_rx(s); > + } > +} --- Regards, Govindraj.R -- To unsubscribe from this list: send the line "unsubscribe linux-serial" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html