Concept was originally implemented for musb_gadget by Felipe Balbi and Juha Yrjölä. Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@xxxxxxxxx> --- drivers/usb/musb/musb_dma.h | 6 ++ drivers/usb/musb/musb_host.c | 168 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+), 0 deletions(-) diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index 916065b..428b5ac 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h @@ -80,6 +80,12 @@ struct musb_hw_ep; #define tusb_dma_omap() 0 #endif +#ifdef CONFIG_USB_INVENTRA_DMA +#define is_inventra_dma() 1 +#else +#define is_inventra_dma() 0 +#endif + /* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1 * Only allow DMA mode 1 to be used when the USB will actually generate the * interrupts we expect. diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index b6139b7..d72969f 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -201,6 +201,174 @@ static inline void musb_release_qtd(struct musb_qtd *qtd) qtd = NULL; } +/** + * start_dma - start dma for a transfer + * @musb: musb controller pointer + * @qtd: queue element to be transfered + * + * Context: controller locked, IRQs blocked, endpoint selected + */ +static int start_dma(struct musb *musb, struct musb_qtd *qtd) +{ + struct dma_controller *dma_controller = musb->dma_controller; + struct musb_hw_ep *hw_ep = qtd->qh->hw_ep; + struct urb *urb = qtd->urb; + int is_in = usb_pipein(urb->pipe); + struct dma_channel *dma; + u32 len, offset = 0; + u16 csr; + + if (!dma_controller) + return -1; + + dma = is_in ? hw_ep->rx_channel : hw_ep->tx_channel; + if (!dma) { + dma = dma_controller->channel_alloc( + dma_controller, hw_ep, !is_in); + if (!dma) + return -1; + + if (is_in) + hw_ep->rx_channel = dma; + else + hw_ep->tx_channel = dma; + } + + len = urb->transfer_buffer_length - urb->actual_length; + dma->actual_len = 0L; + qtd->segsize = min(len, dma->max_len); + + /* Only supporting mode1. For transfers smaller then 512, pio is used. + */ + if (len <= qtd->maxpacket || + (len == qtd->maxpacket && qtd->maxpacket < 512) || + (musb_readw(hw_ep->regs, MUSB_RXCOUNT) < 512)) { + goto small_transfer; + } + + if (is_in && is_inventra_dma()) { + /* REVISIT always round-up the transfer to be multiple of the + * maximum package size or musb will not interrupt. AUTOREQ + * guarantees that an extra IN token is always send at the end + * of every transfer. + * + * FIXME if the transfer is already multiple of the maximum + * package size, removing 512 from the transfer. This should + * not be necessary. Leaving the extra data (AUTOREQ) pending + * should be enough. + */ + if (!(qtd->segsize % qtd->maxpacket)) + qtd->segsize -= qtd->maxpacket; + else + qtd->segsize -= (qtd->segsize % qtd->maxpacket); + } + + if (qtd->type == USB_ENDPOINT_XFER_ISOC) + offset = urb->iso_frame_desc[0].offset; + else + offset = urb->actual_length; + + /* We abort() so dma->actual_len gets updated */ + musb->dma_controller->channel_abort(dma); + + /* always use mode1 for now */ + if (!dma_controller->channel_program( + dma, qtd->maxpacket, + true, + urb->transfer_dma + offset, + qtd->segsize)) { + goto ret; + } + + if (is_in) { + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + csr |= MUSB_RXCSR_H_WZC_BITS; + + csr |= MUSB_RXCSR_H_AUTOREQ; + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + if (qtd->hb_mult == 1) { + csr |= MUSB_RXCSR_AUTOCLEAR; + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + } + + csr |= MUSB_RXCSR_DMAENAB; + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + + csr |= MUSB_RXCSR_DMAMODE; + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + } else { + csr = musb_readw(hw_ep->regs, MUSB_TXCSR); + csr |= MUSB_TXCSR_H_WZC_BITS; + csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE; + if (qtd->hb_mult == 1) + csr |= MUSB_TXCSR_AUTOSET; + csr |= MUSB_TXCSR_MODE; + musb_writew(hw_ep->regs, MUSB_TXCSR, csr); + csr = musb_readw(hw_ep->regs, MUSB_TXCSR); + } + DBG(3, "%s%d dma started, len %d, csr %04x\n", + is_in ? "RX" : "TX", hw_ep->epnum, qtd->segsize, csr); + + return 0; +small_transfer: + DBG(3, "small transfer (%d bytes), using pio\n", + (urb->transfer_flags & URB_SHORT_NOT_OK) + ? len + : musb_readw(hw_ep->regs, MUSB_RXCOUNT)); +ret: + dma_controller->channel_release(dma); + hw_ep->rx_channel = NULL; + hw_ep->tx_channel = NULL; + dma = NULL; + return -1; +} + +/** + * stop_dma - stops a dma transfer + * @musb: musb controller pointer + * @qtd: queue element to be stopped + * + * Context: controller locked, IRQs blocked + */ +static void stop_dma(struct musb *musb, struct musb_qtd *qtd) +{ + struct musb_hw_ep *hw_ep = qtd->qh->hw_ep; + void __iomem *epio = hw_ep->regs; + int is_in = usb_pipein(qtd->urb->pipe); + struct dma_channel *dma; + + if (is_in) { + u16 csr; + + dma = hw_ep->rx_channel; + hw_ep->rx_channel = NULL; + csr = musb_readw(epio, MUSB_RXCSR); + csr &= ~(MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_H_AUTOREQ); + musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_H_WZC_BITS); + csr &= ~MUSB_RXCSR_DMAMODE; + musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_H_WZC_BITS); + } else { + u16 csr; + + dma = hw_ep->tx_channel; + hw_ep->tx_channel = NULL; + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); + musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); + csr &= ~MUSB_TXCSR_DMAMODE; + musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); + } + + if (dma) + musb->dma_controller->channel_release(dma); + dma = NULL; +} + /* * Start the URB at the front of an endpoint's queue * end must be claimed from the caller. -- 1.5.4.3 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html