[PATCH v2]usb: musb: Dynamic dma channel allocation in gadget driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Hema HK  <hemahk@xxxxxx>

Dma channels are allocated in musb_gadget_enable function.
Mentor has only 8 dma channels against 31 endpoints.If there are 
multiple gadgets(g_multi) using multiple endpoints enable the endpoints
but not doing the data trasnfers,then other endpoints endup using PIO mode.
The effective utilization of DMA channels will be done by allocating
the dma channels during data transfer request and releasing when the 
request completes.

Signed-off-by: Hema HK <hemahk@xxxxxx>
Cc: Felipe Balbi <felipe.balbi@xxxxxxxxx>
---
[Fixed the indentation issue]

Index: linux-2.6/drivers/usb/musb/musb_gadget.c
===================================================================
--- linux-2.6.orig/drivers/usb/musb/musb_gadget.c
+++ linux-2.6/drivers/usb/musb/musb_gadget.c
@@ -120,6 +120,11 @@ __acquires(ep->musb->lock)
 	ep->busy = 1;
 	spin_unlock(&musb->lock);
 	if (is_dma_capable()) {
+		if (ep->dma) {
+			struct dma_controller	*c = ep->musb->dma_controller;
+			c->channel_release(ep->dma);
+			ep->dma = NULL;
+		}
 		if (req->request.dma != DMA_ADDR_INVALID) {
 			if (req->mapped) {
 				dma_unmap_single(musb->controller,
@@ -130,13 +135,14 @@ __acquires(ep->musb->lock)
 							: DMA_FROM_DEVICE);
 				req->request.dma = DMA_ADDR_INVALID;
 				req->mapped = 0;
-			} else if (req->request.dma != DMA_ADDR_INVALID)
+			} else {
 				dma_sync_single_for_cpu(musb->controller,
 						req->request.dma,
 						req->request.length,
 						req->tx
 							? DMA_TO_DEVICE
 							: DMA_FROM_DEVICE);
+				}
 		}
 	}
 	if (request->status == 0)
@@ -268,7 +274,9 @@ static void txstate(struct musb *musb, s
 	u16			fifo_count = 0, csr;
 	int			use_dma = 0;
 
+	struct musb_hw_ep	*hw_ep;
 	musb_ep = req->ep;
+	hw_ep = musb_ep->hw_ep;
 
 	/* we shouldn't get here while DMA is active ... but we do ... */
 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -300,11 +308,46 @@ static void txstate(struct musb *musb, s
 			csr);
 
 #ifndef	CONFIG_MUSB_PIO_ONLY
-	if (is_dma_capable() && musb_ep->dma) {
+	if (is_cppi_enabled() || is_dma_capable() || tusb_dma_omap()) {
 		struct dma_controller	*c = musb->dma_controller;
+		/* allocate dma channel if not allocated
+		 * for this endpoint already
+		 */
+		if (!musb_ep->dma) {
+			musb_ep->dma = c->channel_alloc(c,
+						hw_ep,
+						musb_ep->is_in);
 
-		use_dma = (request->dma != DMA_ADDR_INVALID);
+		/* channel allocation is successfull now map the buffer to dma
+		 * configure the DMA channel for recieving th epackes
+		 */
+			if (musb_ep->dma) {
+				if (req->request.dma == DMA_ADDR_INVALID) {
+					req->request.dma = dma_map_single(
+							musb->controller,
+							req->request.buf,
+							req->request.length,
+							req->tx
+							? DMA_TO_DEVICE
+							: DMA_FROM_DEVICE);
+					req->mapped = 1;
+				} else {
+					dma_sync_single_for_device(
+							musb->controller,
+							req->request.dma,
+							req->request.length,
+							req->tx
+							? DMA_TO_DEVICE
+							: DMA_FROM_DEVICE);
+					req->mapped = 0;
+				}
+			}
+		}
+	}
 
+	if (musb_ep->dma) {
+		struct dma_controller	*c = musb->dma_controller;
+		use_dma = (request->dma != DMA_ADDR_INVALID);
 		/* MUSB_TXCSR_P_ISO is still set correctly */
 
 #ifdef CONFIG_USB_INVENTRA_DMA
@@ -395,10 +438,8 @@ static void txstate(struct musb *musb, s
 #endif
 
 	if (!use_dma) {
-		/* unmap the dma buffer back to cpu if dma channel
-		 *programming fails
-		 */
-		 if (is_dma_capable()) {
+		if (is_cppi_enabled() || is_dma_capable()
+					|| tusb_dma_omap()) {
 			if (req->mapped) {
 				dma_unmap_single(musb->controller,
 					req->request.dma,
@@ -408,7 +449,7 @@ static void txstate(struct musb *musb, s
 						: DMA_FROM_DEVICE);
 				req->request.dma = DMA_ADDR_INVALID;
 				req->mapped = 0;
-			} else if (req->request.dma != DMA_ADDR_INVALID) {
+			} else  {
 				dma_sync_single_for_cpu(musb->controller,
 					req->request.dma,
 					req->request.length,
@@ -417,7 +458,6 @@ static void txstate(struct musb *musb, s
 						: DMA_FROM_DEVICE);
 			}
 		}
-
 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
 				(u8 *) (request->buf + request->actual));
 		request->actual += fifo_count;
@@ -599,6 +639,8 @@ static void rxstate(struct musb *musb, s
 	u16			len = musb_ep->packet_sz;
 	u16			csr = musb_readw(epio, MUSB_RXCSR);
 
+	struct musb_hw_ep	*hw_ep;
+	hw_ep = musb_ep->hw_ep;
 	/* We shouldn't get here while DMA is active, but we do... */
 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 		DBG(4, "DMA pending...\n");
@@ -611,9 +653,50 @@ static void rxstate(struct musb *musb, s
 		return;
 	}
 
-	if (is_cppi_enabled() && musb_ep->dma) {
+	if (is_cppi_enabled() || is_dma_capable()
+					|| tusb_dma_omap()) {
+
 		struct dma_controller	*c = musb->dma_controller;
-		struct dma_channel	*channel = musb_ep->dma;
+
+		/* allocate dma channel if not allocated
+		 * for this endpoint already
+		 */
+		if (!musb_ep->dma) {
+			musb_ep->dma = c->channel_alloc(c,
+						hw_ep,
+						musb_ep->is_in);
+
+		/*channel allocation is successfull now map the buffer to dma
+		 * configure the DMA channel for recieving th epackes
+		 */
+			if (musb_ep->dma) {
+				if (req->request.dma == DMA_ADDR_INVALID) {
+					req->request.dma = dma_map_single(
+						musb->controller,
+						req->request.buf,
+						req->request.length,
+						req->tx
+							? DMA_TO_DEVICE
+							: DMA_FROM_DEVICE);
+					req->mapped = 1;
+				} else {
+					dma_sync_single_for_device(
+						musb->controller,
+						req->request.dma,
+						req->request.length,
+						req->tx
+							? DMA_TO_DEVICE
+							: DMA_FROM_DEVICE);
+						req->mapped = 0;
+				}
+			}
+		}
+
+	}
+	if (is_cppi_enabled()) {
+		if (musb_ep->dma) {
+			struct dma_controller	*c = musb->dma_controller;
+			struct dma_channel	*channel = musb_ep->dma;
 
 		/* NOTE:  CPPI won't actually stop advancing the DMA
 		 * queue after short packet transfers, so this is almost
@@ -637,12 +720,13 @@ static void rxstate(struct musb *musb, s
 			return;
 		}
 	}
+	}
 
 	if (csr & MUSB_RXCSR_RXPKTRDY) {
 		len = musb_readw(epio, MUSB_RXCOUNT);
 		if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_dma_capable() && musb_ep->dma) {
+			if (musb_ep->dma) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -722,7 +806,8 @@ static void rxstate(struct musb *musb, s
 
 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
 			if (tusb_dma_omap() && musb_ep->dma) {
-				struct dma_controller *c = musb->dma_controller;
+				struct dma_controller *c =
+							musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
 				int ret;
@@ -736,11 +821,12 @@ static void rxstate(struct musb *musb, s
 					return;
 			}
 #endif
-		/* unmap the dma buffer back to cpu if dma channel
-		 * programming fails. This buffer is mapped if the channel
-		 * allocation is successful
-		 */
-			if (is_dma_capable()) {
+			/* Unmap the buffer from dma and map it back to cpu
+			 * if the dma channel programming fails and switching
+			 * back to cpu mode
+			 */
+			if (is_dma_capable() || tusb_dma_omap()
+					|| is_cppi_enabled()) {
 				if (req->mapped) {
 					dma_unmap_single(musb->controller,
 						req->request.dma,
@@ -750,8 +836,7 @@ static void rxstate(struct musb *musb, s
 							: DMA_FROM_DEVICE);
 					req->request.dma = DMA_ADDR_INVALID;
 					req->mapped = 0;
-				} else if (req->request.dma !=
-						DMA_ADDR_INVALID) {
+				} else {
 					dma_sync_single_for_cpu(
 						musb->controller,
 						req->request.dma,
@@ -1011,20 +1096,13 @@ static int musb_gadget_enable(struct usb
 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
 	 * for some reason you run out of channels here.
 	 */
-	if (is_dma_capable() && musb->dma_controller) {
-		struct dma_controller	*c = musb->dma_controller;
-
-		musb_ep->dma = c->channel_alloc(c, hw_ep,
-				(desc->bEndpointAddress & USB_DIR_IN));
-	} else
-		musb_ep->dma = NULL;
 
 	musb_ep->desc = desc;
 	musb_ep->busy = 0;
 	musb_ep->wedged = 0;
 	status = 0;
 
-	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+	pr_debug("%s periph: enabled %s for %s %s, maxpacket %d\n",
 			musb_driver_name, musb_ep->end_point.name,
 			({ char *s; switch (musb_ep->type) {
 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
@@ -1032,7 +1110,6 @@ static int musb_gadget_enable(struct usb
 			default:			s = "iso"; break;
 			}; s; }),
 			musb_ep->is_in ? "IN" : "OUT",
-			musb_ep->dma ? "dma, " : "",
 			musb_ep->packet_sz);
 
 	schedule_work(&musb->irq_work);
@@ -1159,7 +1236,6 @@ static int musb_gadget_queue(struct usb_
 
 	musb_ep = to_musb_ep(ep);
 	musb = musb_ep->musb;
-
 	request = to_musb_request(req);
 	request->musb = musb;
 
@@ -1174,30 +1250,8 @@ static int musb_gadget_queue(struct usb_
 	request->epnum = musb_ep->current_epnum;
 	request->tx = musb_ep->is_in;
 
-	if (is_dma_capable() && musb_ep->dma) {
-		if (request->request.dma == DMA_ADDR_INVALID) {
-			request->request.dma = dma_map_single(
-					musb->controller,
-					request->request.buf,
-					request->request.length,
-					request->tx
-						? DMA_TO_DEVICE
-						: DMA_FROM_DEVICE);
-			request->mapped = 1;
-		} else {
-			dma_sync_single_for_device(musb->controller,
-					request->request.dma,
-					request->request.length,
-					request->tx
-						? DMA_TO_DEVICE
-						: DMA_FROM_DEVICE);
-			request->mapped = 0;
-		}
-	} else if (!req->buf) {
+	if (!req->buf)
 		return -ENODATA;
-	} else
-		request->mapped = 0;
-
 	spin_lock_irqsave(&musb->lock, lockflags);
 
 	/* don't queue if the ep is down */
--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux