From: Hema HK <hemahk@xxxxxx> Dma channels are allocated in musb_gadget_enable function. Mentor has only 8 dma channels against 31 endpoints.If there are multiple gadgets(g_multi) using multiple endpoints enable the endpoints but not doing the data trasnfers,then other endpoints endup using PIO mode. The effective utilization of DMA channels will be done by allocating the dma channels during data transfer request and releasing when the request completes. Signed-off-by: Hema HK <hemahk@xxxxxx> Cc: Felipe Balbi <felipe.balbi@xxxxxxxxx> --- Based off V2.6.34 + gregkh-07-usb-2.6.34.patch [Rebased with "[PATCH v5] usb: musb: Unmapping the dma buffer when switching to PIO mode" ] Index: linux-2.6/drivers/usb/musb/musb_gadget.c =================================================================== --- linux-2.6.orig/drivers/usb/musb/musb_gadget.c +++ linux-2.6/drivers/usb/musb/musb_gadget.c @@ -122,23 +122,25 @@ static inline void map_dma_buffer(struct static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { - if (request->mapped) { - dma_unmap_single(musb->controller, - request->request.dma, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->request.dma = DMA_ADDR_INVALID; - request->mapped = 0; - } else { - dma_sync_single_for_cpu(musb->controller, - request->request.dma, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); + if (request->request.dma != DMA_ADDR_INVALID) { + if (request->mapped) { + dma_unmap_single(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->request.dma = DMA_ADDR_INVALID; + request->mapped = 0; + } else { + dma_sync_single_for_cpu(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + } } } @@ -173,6 +175,9 @@ __acquires(ep->musb->lock) if (is_dma_capable()) if (ep->dma) { + struct dma_controller *c = ep->musb->dma_controller; + c->channel_release(ep->dma); + ep->dma = NULL; unmap_dma_buffer(req, musb); } if (request->status == 0) @@ -304,8 +309,9 @@ static void txstate(struct musb *musb, s struct usb_request *request; u16 fifo_count = 0, csr; int use_dma = 0; - + struct musb_hw_ep *hw_ep; musb_ep = req->ep; + hw_ep = musb_ep->hw_ep; /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { @@ -337,9 +343,29 @@ static void txstate(struct musb *musb, s csr); #ifndef CONFIG_MUSB_PIO_ONLY - if (is_dma_capable() && musb_ep->dma) { + if (is_cppi_enabled() || is_dma_capable() || tusb_dma_omap()) { struct dma_controller *c = musb->dma_controller; + /* + * allocate dma channel if not allocated + * for this endpoint already + */ + if (!musb_ep->dma) { + musb_ep->dma = c->channel_alloc(c, + hw_ep, + musb_ep->is_in); + + /* + * channel allocation is successfull now map the + * buffer to dma configure the DMA channel for + * recieving the packets + */ + if (musb_ep->dma) + map_dma_buffer(req, musb); + } + } + if (musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; use_dma = (request->dma != DMA_ADDR_INVALID); /* MUSB_TXCSR_P_ISO is still set correctly */ @@ -621,6 +647,8 @@ static void rxstate(struct musb *musb, s u16 len = musb_ep->packet_sz; u16 csr = musb_readw(epio, MUSB_RXCSR); + struct musb_hw_ep *hw_ep; + hw_ep = musb_ep->hw_ep; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { DBG(4, "DMA pending...\n"); @@ -633,30 +661,57 @@ static void rxstate(struct musb *musb, s return; } - if (is_cppi_enabled() && musb_ep->dma) { + if (is_cppi_enabled() || is_dma_capable() + || tusb_dma_omap()) { + struct dma_controller *c = musb->dma_controller; - struct dma_channel *channel = musb_ep->dma; - /* NOTE: CPPI won't actually stop advancing the DMA - * queue after short packet transfers, so this is almost - * always going to run as IRQ-per-packet DMA so that - * faults will be handled correctly. + /* + * Allocate dma channel if not allocated + * for this endpoint already */ - if (c->channel_program(channel, - musb_ep->packet_sz, - !request->short_not_ok, - request->dma + request->actual, - request->length - request->actual)) { - - /* make sure that if an rxpkt arrived after the irq, - * the cppi engine will be ready to take it as soon - * as DMA is enabled + if (!musb_ep->dma) { + musb_ep->dma = c->channel_alloc(c, + hw_ep, + musb_ep->is_in); + + /* + * Channel allocation is successfull now map the buffer + * to dma configure the DMA channel for recieving the + * packets */ - csr &= ~(MUSB_RXCSR_AUTOCLEAR - | MUSB_RXCSR_DMAMODE); - csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; - musb_writew(epio, MUSB_RXCSR, csr); - return; + if (musb_ep->dma) + map_dma_buffer(req, musb); + } + } + if (is_cppi_enabled()) { + if (musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + struct dma_channel *channel = musb_ep->dma; + + /* NOTE: CPPI won't actually stop advancing the DMA + * queue after short packet transfers, so this is almost + * always going to run as IRQ-per-packet DMA so that + * faults will be handled correctly. + */ + if (c->channel_program(channel, + musb_ep->packet_sz, + !request->short_not_ok, + request->dma + request->actual, + request->length - request->actual)) { + + /* + * Make sure that if an rxpkt arrived after the + * irq,the cppi engine will be ready to take it + * as soonas DMA is enabled + */ + csr &= ~(MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_DMAMODE); + csr |= MUSB_RXCSR_DMAENAB + |MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + return; + } } } @@ -664,7 +719,7 @@ static void rxstate(struct musb *musb, s len = musb_readw(epio, MUSB_RXCOUNT); if (request->actual < request->length) { #ifdef CONFIG_USB_INVENTRA_DMA - if (is_dma_capable() && musb_ep->dma) { + if (musb_ep->dma) { struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; @@ -1016,20 +1071,13 @@ static int musb_gadget_enable(struct usb /* NOTE: all the I/O code _should_ work fine without DMA, in case * for some reason you run out of channels here. */ - if (is_dma_capable() && musb->dma_controller) { - struct dma_controller *c = musb->dma_controller; - - musb_ep->dma = c->channel_alloc(c, hw_ep, - (desc->bEndpointAddress & USB_DIR_IN)); - } else - musb_ep->dma = NULL; musb_ep->desc = desc; musb_ep->busy = 0; musb_ep->wedged = 0; status = 0; - pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", + pr_debug("%s periph: enabled %s for %s %s, maxpacket %d\n", musb_driver_name, musb_ep->end_point.name, ({ char *s; switch (musb_ep->type) { case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; @@ -1037,7 +1085,6 @@ static int musb_gadget_enable(struct usb default: s = "iso"; break; }; s; }), musb_ep->is_in ? "IN" : "OUT", - musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); schedule_work(&musb->irq_work); @@ -1179,12 +1226,8 @@ static int musb_gadget_queue(struct usb_ request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; - if (is_dma_capable() && musb_ep->dma) { - map_dma_buffer(request, musb); - } else if (!req->buf) { + if (!req->buf) return -ENODATA; - } else - request->mapped = 0; spin_lock_irqsave(&musb->lock, lockflags); -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html