[PATCH 095/141] USB: imx21-hcd accept arbitary transfer buffer alignement.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Martin Fuzzey <mfuzzey@xxxxxxxxx>

The hardware can only do DMA to 4 byte aligned addresses.
When this requirement is not met use PIO or a bounce buffer.

PIO is used when the buffer is small enough to directly
use the hardware data memory (2*maxpacket).

A bounce buffer is used for larger transfers.

Signed-off-by: Martin Fuzzey <mfuzzey@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxx>
---
 drivers/usb/host/imx21-hcd.c |  196 +++++++++++++++++++++++++++++++++++-------
 drivers/usb/host/imx21-hcd.h |    8 ++-
 2 files changed, 169 insertions(+), 35 deletions(-)

diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 890a41c..dd132eb 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -57,6 +57,7 @@
 #include <linux/slab.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
 
 #include "imx21-hcd.h"
 
@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd)
 	return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
 }
 
+static inline bool unsuitable_for_dma(dma_addr_t addr)
+{
+	return (addr & 3) != 0;
+}
 
 #include "imx21-dbg.c"
 
+static void nonisoc_urb_completed_for_etd(
+	struct imx21 *imx21, struct etd_priv *etd, int status);
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
+
 /* =========================================== */
 /* ETD management				*/
 /* ===========================================	*/
@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num)
 		etd_writel(imx21, num, i, 0);
 	etd->urb = NULL;
 	etd->ep = NULL;
-	etd->td = NULL;;
+	etd->td = NULL;
+	etd->bounce_buffer = NULL;
 }
 
 static void free_etd(struct imx21 *imx21, int num)
@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21,
 		((u32) maxpacket << DW0_MAXPKTSIZ));
 }
 
-static void activate_etd(struct imx21 *imx21,
-	int etd_num, dma_addr_t dma, u8 dir)
+/**
+ * Copy buffer to data controller data memory.
+ * We cannot use memcpy_toio() because the hardware requires 32bit writes
+ */
+static void copy_to_dmem(
+	struct imx21 *imx21, int dmem_offset, void *src, int count)
+{
+	void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
+	u32 word = 0;
+	u8 *p = src;
+	int byte = 0;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		byte = i % 4;
+		word += (*p++ << (byte * 8));
+		if (byte == 3) {
+			writel(word, dmem);
+			dmem += 4;
+			word = 0;
+		}
+	}
+
+	if (count && byte != 3)
+		writel(word, dmem);
+}
+
+static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
 {
 	u32 etd_mask = 1 << etd_num;
 	struct etd_priv *etd = &imx21->etd[etd_num];
 
+	if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
+		/* For non aligned isoc the condition below is always true */
+		if (etd->len <= etd->dmem_size) {
+			/* Fits into data memory, use PIO */
+			if (dir != TD_DIR_IN) {
+				copy_to_dmem(imx21,
+						etd->dmem_offset,
+						etd->cpu_buffer, etd->len);
+			}
+			etd->dma_handle = 0;
+
+		} else {
+			/* Too big for data memory, use bounce buffer */
+			enum dma_data_direction dmadir;
+
+			if (dir == TD_DIR_IN) {
+				dmadir = DMA_FROM_DEVICE;
+				etd->bounce_buffer = kmalloc(etd->len,
+								GFP_ATOMIC);
+			} else {
+				dmadir = DMA_TO_DEVICE;
+				etd->bounce_buffer = kmemdup(etd->cpu_buffer,
+								etd->len,
+								GFP_ATOMIC);
+			}
+			if (!etd->bounce_buffer) {
+				dev_err(imx21->dev, "failed bounce alloc\n");
+				goto err_bounce_alloc;
+			}
+
+			etd->dma_handle =
+				dma_map_single(imx21->dev,
+						etd->bounce_buffer,
+						etd->len,
+						dmadir);
+			if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
+				dev_err(imx21->dev, "failed bounce map\n");
+				goto err_bounce_map;
+			}
+		}
+	}
+
 	clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
 	set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
 	clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 	clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 
-	if (dma) {
+	if (etd->dma_handle) {
 		set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
 		clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
 		clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
-		writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
+		writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
 		set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
 	} else {
 		if (dir != TD_DIR_IN) {
-			/* need to set for ZLP */
+			/* need to set for ZLP and PIO */
 			set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 			set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 		}
@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21,
 
 	etd->active_count = 1;
 	writel(etd_mask, imx21->regs + USBH_ETDENSET);
+	return;
+
+err_bounce_map:
+	kfree(etd->bounce_buffer);
+
+err_bounce_alloc:
+	free_dmem(imx21, etd);
+	nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
 }
 
 /* ===========================================	*/
@@ -325,7 +412,7 @@ static void activate_queued_etd(struct imx21 *imx21,
 
 	etd->dmem_offset = dmem_offset;
 	urb_priv->active = 1;
-	activate_etd(imx21, etd_num, etd->dma_handle, dir);
+	activate_etd(imx21, etd_num, dir);
 }
 
 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
@@ -385,7 +472,6 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
 /* ===========================================	*/
 /* End handling 				*/
 /* ===========================================	*/
-static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
 
 /* Endpoint now idle - release it's ETD(s) or asssign to queued request */
 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
@@ -448,6 +534,24 @@ __acquires(imx21->lock)
 		ep_idle(imx21, ep_priv);
 }
 
+static void nonisoc_urb_completed_for_etd(
+	struct imx21 *imx21, struct etd_priv *etd, int status)
+{
+	struct usb_host_endpoint *ep = etd->ep;
+
+	urb_done(imx21->hcd, etd->urb, status);
+	etd->urb = NULL;
+
+	if (!list_empty(&ep->urb_list)) {
+		struct urb *urb = list_first_entry(
+					&ep->urb_list, struct urb, urb_list);
+
+		dev_vdbg(imx21->dev, "next URB %p\n", urb);
+		schedule_nonisoc_etd(imx21, urb);
+	}
+}
+
+
 /* ===========================================	*/
 /* ISOC Handling ... 				*/
 /* ===========================================	*/
@@ -500,6 +604,8 @@ too_late:
 		etd->ep = td->ep;
 		etd->urb = td->urb;
 		etd->len = td->len;
+		etd->dma_handle = td->dma_handle;
+		etd->cpu_buffer = td->cpu_buffer;
 
 		debug_isoc_submitted(imx21, cur_frame, td);
 
@@ -513,16 +619,17 @@ too_late:
 			(TD_NOTACCESSED << DW3_COMPCODE0) |
 			(td->len << DW3_PKTLEN0));
 
-		activate_etd(imx21, etd_num, td->data, dir);
+		activate_etd(imx21, etd_num, dir);
 	}
 }
 
-static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
 {
 	struct imx21 *imx21 = hcd_to_imx21(hcd);
 	int etd_mask = 1 << etd_num;
-	struct urb_priv *urb_priv = urb->hcpriv;
 	struct etd_priv *etd = imx21->etd + etd_num;
+	struct urb *urb = etd->urb;
+	struct urb_priv *urb_priv = urb->hcpriv;
 	struct td *td = etd->td;
 	struct usb_host_endpoint *ep = etd->ep;
 	int isoc_index = td->isoc_index;
@@ -556,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
 			bytes_xfrd, td->len, urb, etd_num, isoc_index);
 	}
 
-	if (dir_in)
+	if (dir_in) {
 		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+		if (!etd->dma_handle)
+			memcpy_fromio(etd->cpu_buffer,
+				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+				bytes_xfrd);
+	}
 
 	urb->actual_length += bytes_xfrd;
 	urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
@@ -716,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
 	/* set up transfers */
 	td = urb_priv->isoc_td;
 	for (i = 0; i < urb->number_of_packets; i++, td++) {
+		unsigned int offset = urb->iso_frame_desc[i].offset;
 		td->ep = ep;
 		td->urb = urb;
 		td->len = urb->iso_frame_desc[i].length;
 		td->isoc_index = i;
 		td->frame = wrap_frame(urb->start_frame + urb->interval * i);
-		td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
+		td->dma_handle = urb->transfer_dma + offset;
+		td->cpu_buffer = urb->transfer_buffer + offset;
 		list_add_tail(&td->list, &ep_priv->td_list);
 	}
 
@@ -812,13 +926,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
 	if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
 		if (state == US_CTRL_SETUP) {
 			dir = TD_DIR_SETUP;
+			if (unsuitable_for_dma(urb->setup_dma))
+				unmap_urb_setup_for_dma(imx21->hcd, urb);
 			etd->dma_handle = urb->setup_dma;
+			etd->cpu_buffer = urb->setup_packet;
 			bufround = 0;
 			count = 8;
 			datatoggle = TD_TOGGLE_DATA0;
 		} else {	/* US_CTRL_ACK */
 			dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
-			etd->dma_handle = urb->transfer_dma;
 			bufround = 0;
 			count = 0;
 			datatoggle = TD_TOGGLE_DATA1;
@@ -826,7 +942,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
 	} else {
 		dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
 		bufround = (dir == TD_DIR_IN) ? 1 : 0;
+		if (unsuitable_for_dma(urb->transfer_dma))
+			unmap_urb_for_dma(imx21->hcd, urb);
+
 		etd->dma_handle = urb->transfer_dma;
+		etd->cpu_buffer = urb->transfer_buffer;
 		if (usb_pipebulk(pipe) && (state == US_BULK0))
 			count = 0;
 		else
@@ -901,14 +1021,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
 	/* enable the ETD to kick off transfer */
 	dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
 		etd_num, count, dir != TD_DIR_IN ? "out" : "in");
-	activate_etd(imx21, etd_num, etd->dma_handle, dir);
+	activate_etd(imx21, etd_num, dir);
 
 }
 
-static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
 {
 	struct imx21 *imx21 = hcd_to_imx21(hcd);
 	struct etd_priv *etd = &imx21->etd[etd_num];
+	struct urb *urb = etd->urb;
 	u32 etd_mask = 1 << etd_num;
 	struct urb_priv *urb_priv = urb->hcpriv;
 	int dir;
@@ -930,7 +1051,20 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
 	if (dir == TD_DIR_IN) {
 		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 		clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+		if (etd->bounce_buffer) {
+			memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
+			dma_unmap_single(imx21->dev,
+				etd->dma_handle, etd->len, DMA_FROM_DEVICE);
+		} else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
+			memcpy_fromio(etd->cpu_buffer,
+				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+				bytes_xfrd);
+		}
 	}
+
+	kfree(etd->bounce_buffer);
+	etd->bounce_buffer = NULL;
 	free_dmem(imx21, etd);
 
 	urb->error_count = 0;
@@ -988,24 +1122,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
 		break;
 	}
 
-	if (!etd_done) {
+	if (etd_done)
+		nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
+	else {
 		dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
 		schedule_nonisoc_etd(imx21, urb);
-	} else {
-		struct usb_host_endpoint *ep = urb->ep;
-
-		urb_done(hcd, urb, cc_to_error[cc]);
-		etd->urb = NULL;
-
-		if (!list_empty(&ep->urb_list)) {
-			urb = list_first_entry(&ep->urb_list,
-				struct urb, urb_list);
-			dev_vdbg(imx21->dev, "next URB %p\n", urb);
-			schedule_nonisoc_etd(imx21, urb);
-		}
 	}
 }
 
+
 static struct ep_priv *alloc_ep(void)
 {
 	int i;
@@ -1146,9 +1271,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
 	} else if (urb_priv->active) {
 		int etd_num = ep_priv->etd[0];
 		if (etd_num != -1) {
+			struct etd_priv *etd = &imx21->etd[etd_num];
+
 			disactivate_etd(imx21, etd_num);
-			free_dmem(imx21, &imx21->etd[etd_num]);
-			imx21->etd[etd_num].urb = NULL;
+			free_dmem(imx21, etd);
+			etd->urb = NULL;
+			kfree(etd->bounce_buffer);
+			etd->bounce_buffer = NULL;
 		}
 	}
 
@@ -1248,9 +1377,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
 		}
 
 		if (usb_pipeisoc(etd->urb->pipe))
-			isoc_etd_done(hcd, etd->urb, etd_num);
+			isoc_etd_done(hcd, etd_num);
 		else
-			nonisoc_etd_done(hcd, etd->urb, etd_num);
+			nonisoc_etd_done(hcd, etd_num);
 	}
 
 	/* only enable SOF interrupt if it may be needed for the kludge */
@@ -1718,6 +1847,7 @@ static int imx21_probe(struct platform_device *pdev)
 	}
 
 	imx21 = hcd_to_imx21(hcd);
+	imx21->hcd = hcd;
 	imx21->dev = &pdev->dev;
 	imx21->pdata = pdev->dev.platform_data;
 	if (!imx21->pdata)
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 1b0d913..87b29fd 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -250,6 +250,7 @@
 #define USBCTRL_USB_BYP			(1 << 2)
 #define USBCTRL_HOST1_TXEN_OE		(1 << 1)
 
+#define USBOTG_DMEM		0x1000
 
 /* Values in TD blocks */
 #define TD_DIR_SETUP	    0
@@ -346,8 +347,8 @@ struct td {
 	struct list_head list;
 	struct urb *urb;
 	struct usb_host_endpoint *ep;
-	dma_addr_t data;
-	unsigned long buf_addr;
+	dma_addr_t dma_handle;
+	void *cpu_buffer;
 	int len;
 	int frame;
 	int isoc_index;
@@ -360,6 +361,8 @@ struct etd_priv {
 	struct td *td;
 	struct list_head queue;
 	dma_addr_t dma_handle;
+	void *cpu_buffer;
+	void *bounce_buffer;
 	int alloc;
 	int len;
 	int dmem_size;
@@ -412,6 +415,7 @@ struct debug_isoc_trace {
 struct imx21 {
 	spinlock_t lock;
 	struct device *dev;
+	struct usb_hcd *hcd;
 	struct mx21_usbh_platform_data *pdata;
 	struct list_head dmem_list;
 	struct list_head queue_for_etd; /* eps queued due to etd shortage */
-- 
1.7.2

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux