[RFC/PATCH 2/5] usb: musb: host: transfer queue for every endpoint

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Mimicking QTDs from EHCI spec. Every Endpoint will have a
QH filled with QTDs.

There is one QTD per URB, but QTDs could easily be split
for example per musb_dma_channel.max_len.

Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@xxxxxxxxx>
---
 drivers/usb/musb/musb_core.c |   35 +++-
 drivers/usb/musb/musb_core.h |    5 +-
 drivers/usb/musb/musb_host.c |  552 +++++++++++++++++++-----------------------
 drivers/usb/musb/musb_host.h |   57 ++---
 4 files changed, 296 insertions(+), 353 deletions(-)

diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 00a4e19..52cfbf5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1114,6 +1114,25 @@ fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
 	musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
 
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
+	/* allocate queue heads */
+	hw_ep->in_qh = kzalloc(sizeof(*hw_ep->in_qh), GFP_KERNEL);
+	if (!hw_ep->in_qh)
+		return -ENOMEM;
+	hw_ep->in_qh->hw_ep = hw_ep;
+	hw_ep->in_qh->musb = musb;
+	INIT_LIST_HEAD(&hw_ep->in_qh->qtd_list);
+
+	/* for EP0, only one queue head */
+	if (offset) {
+		hw_ep->out_qh = kzalloc(sizeof(*hw_ep->out_qh), GFP_KERNEL);
+		if (!hw_ep->in_qh)
+			return -ENOMEM;
+		hw_ep->out_qh->hw_ep = hw_ep;
+		hw_ep->out_qh->musb = musb;
+		INIT_LIST_HEAD(&hw_ep->out_qh->qtd_list);
+	} else
+		hw_ep->out_qh = hw_ep->in_qh;
+
 	/* EP0 reserved endpoint for control, bidirectional;
 	 * EP1 reserved for bulk, two unidirection halves.
 	 */
@@ -1761,10 +1780,6 @@ allocate_instance(struct device *dev,
 	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
 
 	musb = hcd_to_musb(hcd);
-	INIT_LIST_HEAD(&musb->control);
-	INIT_LIST_HEAD(&musb->in_bulk);
-	INIT_LIST_HEAD(&musb->out_bulk);
-
 	hcd->uses_new_polling = 1;
 
 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
@@ -1795,6 +1810,10 @@ allocate_instance(struct device *dev,
 
 static void musb_free(struct musb *musb)
 {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	struct musb_hw_ep *hw_ep;
+	int i;
+#endif
 	/* this has multiple entry modes. it handles fault cleanup after
 	 * probe(), where things may be partially set up, as well as rmmod
 	 * cleanup after everything's been de-activated.
@@ -1834,6 +1853,14 @@ static void musb_free(struct musb *musb)
 	}
 
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
+	hw_ep = musb->endpoints;
+	kfree(hw_ep->in_qh);
+	for (i = 1, hw_ep = musb->endpoints + 1;
+			i < musb->nr_endpoints;
+			i++, hw_ep++) {
+		kfree(hw_ep->in_qh);
+		kfree(hw_ep->out_qh);
+	}
 	usb_put_hcd(musb_to_hcd(musb));
 #else
 	kfree(musb);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 84a941c..77944d9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -283,6 +283,7 @@ struct musb_hw_ep {
 	/* currently scheduled peripheral endpoint */
 	struct musb_qh		*in_qh;
 	struct musb_qh		*out_qh;
+	u8			reserved:1;
 
 	u8			rx_reinit;
 	u8			tx_reinit;
@@ -349,10 +350,6 @@ struct musb {
 	 */
 	struct musb_hw_ep	*bulk_ep;
 
-	struct list_head	control;	/* of musb_qh */
-	struct list_head	in_bulk;	/* of musb_qh */
-	struct list_head	out_bulk;	/* of musb_qh */
-
 	struct timer_list	otg_timer;
 #endif
 
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e2d1eb2..b6139b7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -97,7 +97,7 @@
 
 
 static void musb_ep_program(struct musb *musb, u8 epnum,
-			struct urb *urb, int is_out,
+			struct musb_qtd *qtd, int is_out,
 			u8 *buf, u32 offset, u32 len);
 
 /*
@@ -169,17 +169,36 @@ static inline void musb_h_tx_start(struct musb_hw_ep *ep)
 
 }
 
-static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
 {
-	if (is_in != 0 || ep->is_shared_fifo)
-		ep->in_qh  = qh;
-	if (is_in == 0 || ep->is_shared_fifo)
-		ep->out_qh = qh;
+	return is_in ? ep->in_qh : ep->out_qh;
 }
 
-static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+static struct musb_qtd *musb_qh_get_qtd(struct musb_qh *qh)
 {
-	return is_in ? ep->in_qh : ep->out_qh;
+	return list_entry(qh->qtd_list.next, struct musb_qtd, qtd_list);
+}
+
+/*
+ * REVISIT consider a dedicated qtd kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+static struct musb_qtd *musb_allocate_qtd(gfp_t mem_flags)
+{
+	struct musb_qtd *qtd;
+
+	qtd = kzalloc(sizeof *qtd, mem_flags);
+	if (qtd)
+		INIT_LIST_HEAD(&qtd->qtd_list);
+
+	return qtd;
+}
+
+static inline void musb_release_qtd(struct musb_qtd *qtd)
+{
+	list_del(&qtd->qtd_list);
+	kfree(qtd);
+	qtd = NULL;
 }
 
 /*
@@ -189,12 +208,13 @@ static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
  * Context: controller locked, irqs blocked
  */
 static void
-musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+musb_start_urb(struct musb *musb, int is_in, struct musb_qtd *qtd)
 {
 	u16			frame;
 	u32			len;
 	void __iomem		*mbase =  musb->mregs;
-	struct urb		*urb = next_urb(qh);
+	struct musb_qh		*qh = qtd->qh;
+	struct urb		*urb = qtd->urb;
 	void			*buf = urb->transfer_buffer;
 	u32			offset = 0;
 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
@@ -203,11 +223,11 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 	int			epnum = hw_ep->epnum;
 
 	/* initialize software qh state */
-	qh->offset = 0;
-	qh->segsize = 0;
+	qtd->offset = 0;
+	qtd->segsize = 0;
 
 	/* gather right source of data */
-	switch (qh->type) {
+	switch (qtd->type) {
 	case USB_ENDPOINT_XFER_CONTROL:
 		/* control transfers always start with SETUP */
 		is_in = 0;
@@ -216,8 +236,8 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 		len = 8;
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
-		qh->iso_idx = 0;
-		qh->frame = 0;
+		qtd->iso_idx = 0;
+		qtd->frame = 0;
 		offset = urb->iso_frame_desc[0].offset;
 		len = urb->iso_frame_desc[0].length;
 		break;
@@ -228,9 +248,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 	}
 
 	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
-			qh, urb, address, qh->epnum,
+			qh, urb, address, qtd->epnum,
 			is_in ? "in" : "out",
-			({char *s; switch (qh->type) {
+			({char *s; switch (qtd->type) {
 			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
 			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
 			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
@@ -239,15 +259,14 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 			epnum, buf + offset, len);
 
 	/* Configure endpoint */
-	musb_ep_set_qh(hw_ep, is_in, qh);
-	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
+	musb_ep_program(musb, epnum, qtd, !is_in, buf, offset, len);
 
 	/* transmit may have more work: start it when it is time */
 	if (is_in)
 		return;
 
 	/* determine if the time is right for a periodic transfer */
-	switch (qh->type) {
+	switch (qtd->type) {
 	case USB_ENDPOINT_XFER_ISOC:
 	case USB_ENDPOINT_XFER_INT:
 		DBG(3, "check whether there's still time for periodic Tx\n");
@@ -260,10 +279,10 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 			/* REVISIT the SOF irq handler shouldn't duplicate
 			 * this code; and we don't init urb->start_frame...
 			 */
-			qh->frame = 0;
+			qtd->frame = 0;
 			goto start;
 		} else {
-			qh->frame = urb->start_frame;
+			qtd->frame = urb->start_frame;
 			/* enable SOF interrupt so we can count down */
 			DBG(1, "SOF for %d\n", epnum);
 #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
@@ -314,10 +333,10 @@ __acquires(musb->lock)
 }
 
 /* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+static inline void musb_save_toggle(struct musb_qtd *qtd, int is_in,
 				    struct urb *urb)
 {
-	void __iomem		*epio = qh->hw_ep->regs;
+	void __iomem		*epio = qtd->qh->hw_ep->regs;
 	u16			csr;
 
 	/*
@@ -330,31 +349,32 @@ static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
 	else
 		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
 
-	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
+	usb_settoggle(urb->dev, qtd->epnum, !is_in, csr ? 1 : 0);
 }
 
 /*
  * Advance this hardware endpoint's queue, completing the specified URB and
- * advancing to either the next URB queued to that qh, or else invalidating
- * that qh and advancing to the next qh scheduled after the current one.
+ * advancing to either the next qtd queued to that qh, or else invalidating
+ * that qtd.
  *
  * Context: caller owns controller lock, IRQs are blocked
  */
-static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+static void musb_advance_schedule(struct musb *musb, struct musb_qtd *qtd,
 				  struct musb_hw_ep *hw_ep, int is_in)
 {
-	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
+	struct musb_qh		*qh = qtd->qh;
+	struct urb		*urb = qtd->urb;
 	struct musb_hw_ep	*ep = qh->hw_ep;
-	int			ready = qh->is_ready;
 	int			status;
+	struct usb_host_endpoint *hep = qtd->hep;
 
 	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 
 	/* save toggle eagerly, for paranoia */
-	switch (qh->type) {
+	switch (qtd->type) {
 	case USB_ENDPOINT_XFER_BULK:
 	case USB_ENDPOINT_XFER_INT:
-		musb_save_toggle(qh, is_in, urb);
+		musb_save_toggle(qtd, is_in, urb);
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		if (status == 0 && urb->error_count)
@@ -362,56 +382,29 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
 		break;
 	}
 
-	qh->is_ready = 0;
+	qtd->is_ready = 0;
+	urb->hcpriv = NULL;
 	musb_giveback(musb, urb, status);
-	qh->is_ready = ready;
+	musb_release_qtd(qtd);
 
 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
-	 * invalidate qh as soon as list_empty(&hep->urb_list)
+	 * invalidate qh as soon as list_empty(&qh->qtd_list)
 	 */
-	if (list_empty(&qh->hep->urb_list)) {
-		struct list_head	*head;
-
+	if (list_empty(&qh->qtd_list)) {
 		if (is_in)
 			ep->rx_reinit = 1;
 		else
 			ep->tx_reinit = 1;
 
-		/* Clobber old pointers to this qh */
-		musb_ep_set_qh(ep, is_in, NULL);
-		qh->hep->hcpriv = NULL;
-
-		switch (qh->type) {
-
-		case USB_ENDPOINT_XFER_CONTROL:
-		case USB_ENDPOINT_XFER_BULK:
-			/* fifo policy for these lists, except that NAKing
-			 * should rotate a qh to the end (for fairness).
-			 */
-			if (qh->mux == 1) {
-				head = qh->ring.prev;
-				list_del(&qh->ring);
-				kfree(qh);
-				qh = first_qh(head);
-				break;
-			}
-
-		case USB_ENDPOINT_XFER_ISOC:
-		case USB_ENDPOINT_XFER_INT:
-			/* this is where periodic bandwidth should be
-			 * de-allocated if it's tracked and allocated;
-			 * and where we'd update the schedule tree...
-			 */
-			kfree(qh);
-			qh = NULL;
-			break;
-		}
-	}
+		hep->hcpriv = NULL;
+		ep->reserved = false;
+	} else
+		qtd = musb_qh_get_qtd(qh);
 
-	if (qh != NULL && qh->is_ready) {
+	if (qtd != NULL && qtd->is_ready) {
 		DBG(4, "... next ep%d %cX urb %p\n",
-		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
-		musb_start_urb(musb, is_in, qh);
+		    hw_ep->epnum, is_in ? 'R' : 'T', qtd->urb);
+		musb_start_urb(musb, is_in, qtd);
 	}
 }
 
@@ -438,7 +431,8 @@ static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
  * PIO RX for a packet (or part of it).
  */
 static bool
-musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+musb_host_packet_rx(struct musb *musb, struct musb_qtd *qtd,
+			u8 epnum, u8 iso_err)
 {
 	u16			rx_count;
 	u8			*buf;
@@ -448,14 +442,14 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 	int			do_flush = 0;
 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
 	void __iomem		*epio = hw_ep->regs;
-	struct musb_qh		*qh = hw_ep->in_qh;
+	struct urb		*urb = qtd->urb;
 	int			pipe = urb->pipe;
 	void			*buffer = urb->transfer_buffer;
 
 	/* musb_ep_select(mbase, epnum); */
 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
 	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
-			urb->transfer_buffer, qh->offset,
+			urb->transfer_buffer, qtd->offset,
 			urb->transfer_buffer_length);
 
 	/* unload FIFO */
@@ -468,7 +462,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 			urb->error_count++;
 		}
 
-		d = urb->iso_frame_desc + qh->iso_idx;
+		d = urb->iso_frame_desc + qtd->iso_idx;
 		buf = buffer + d->offset;
 		length = d->length;
 		if (rx_count > length) {
@@ -486,11 +480,11 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 		d->status = status;
 
 		/* see if we are done */
-		done = (++qh->iso_idx >= urb->number_of_packets);
+		done = (++qtd->iso_idx >= urb->number_of_packets);
 	} else {
 		/* non-isoch */
-		buf = buffer + qh->offset;
-		length = urb->transfer_buffer_length - qh->offset;
+		buf = buffer + qtd->offset;
+		length = urb->transfer_buffer_length - qtd->offset;
 		if (rx_count > length) {
 			if (urb->status == -EINPROGRESS)
 				urb->status = -EOVERFLOW;
@@ -499,11 +493,11 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 		} else
 			length = rx_count;
 		urb->actual_length += length;
-		qh->offset += length;
+		qtd->offset += length;
 
 		/* see if we are done */
 		done = (urb->actual_length == urb->transfer_buffer_length)
-			|| (rx_count < qh->maxpacket)
+			|| (rx_count < qtd->maxpacket)
 			|| (urb->status != -EINPROGRESS);
 		if (done
 				&& (urb->status == -EINPROGRESS)
@@ -539,7 +533,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
  * the busy/not-empty tests are basically paranoia.
  */
 static void
-musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+musb_rx_reinit(struct musb *musb, struct musb_qtd *qtd, struct musb_hw_ep *ep)
 {
 	u16	csr;
 
@@ -578,19 +572,19 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
 
 	/* target addr and (for multipoint) hub addr/port */
 	if (musb->is_multipoint) {
-		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
-		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
-		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
+		musb_write_rxfunaddr(ep->target_regs, qtd->addr_reg);
+		musb_write_rxhubaddr(ep->target_regs, qtd->h_addr_reg);
+		musb_write_rxhubport(ep->target_regs, qtd->h_port_reg);
 
 	} else
-		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+		musb_writeb(musb->mregs, MUSB_FADDR, qtd->addr_reg);
 
 	/* protocol/endpoint, interval/NAKlimit, i/o size */
-	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
-	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+	musb_writeb(ep->regs, MUSB_RXTYPE, qtd->type_reg);
+	musb_writeb(ep->regs, MUSB_RXINTERVAL, qtd->intv_reg);
 	/* NOTE: bulk combining rewrites high bits of maxpacket */
 	musb_writew(ep->regs, MUSB_RXMAXP,
-			qh->maxpacket | ((qh->hb_mult - 1) << 11));
+			qtd->maxpacket | ((qtd->hb_mult - 1) << 11));
 
 	ep->rx_reinit = 0;
 }
@@ -600,21 +594,21 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
  * Context: irqs blocked, controller lock held
  */
 static void musb_ep_program(struct musb *musb, u8 epnum,
-			struct urb *urb, int is_out,
+			struct musb_qtd *qtd, int is_out,
 			u8 *buf, u32 offset, u32 len)
 {
 	void __iomem		*mbase = musb->mregs;
-	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+	struct musb_hw_ep	*hw_ep = qtd->qh->hw_ep;
 	void __iomem		*epio = hw_ep->regs;
-	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
-	u16			packet_sz = qh->maxpacket;
+	struct urb		*urb = qtd->urb;
+	u16			packet_sz = qtd->maxpacket;
 
 	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
 				"h_addr%02x h_port%02x bytes %d\n",
 			is_out ? "-->" : "<--",
 			epnum, urb, urb->dev->speed,
-			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
-			qh->h_addr_reg, qh->h_port_reg,
+			qtd->addr_reg, qtd->epnum, is_out ? "out" : "in",
+			qtd->h_addr_reg, qtd->h_port_reg,
 			len);
 
 	musb_ep_select(mbase, epnum);
@@ -653,7 +647,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 					);
 			csr |= MUSB_TXCSR_MODE;
 
-			if (usb_gettoggle(urb->dev, qh->epnum, 1))
+			if (usb_gettoggle(urb->dev, qtd->epnum, 1))
 				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
 					| MUSB_TXCSR_H_DATATOGGLE;
 			else
@@ -671,17 +665,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 
 		/* target addr and (for multipoint) hub addr/port */
 		if (musb->is_multipoint) {
-			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
-			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
-			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
+			musb_write_txfunaddr(mbase, epnum, qtd->addr_reg);
+			musb_write_txhubaddr(mbase, epnum, qtd->h_addr_reg);
+			musb_write_txhubport(mbase, epnum, qtd->h_port_reg);
 			/* FIXME if !epnum, do the same for RX ... */
 		} else
-			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+			musb_writeb(mbase, MUSB_FADDR, qtd->addr_reg);
 
 		/* protocol/endpoint/interval/NAKlimit */
 		if (epnum) {
-			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
-			if (can_bulk_split(musb, qh->type))
+			musb_writeb(epio, MUSB_TXTYPE, qtd->type_reg);
+			if (can_bulk_split(musb, qtd->type))
 				musb_writew(epio, MUSB_TXMAXP,
 					packet_sz
 					| ((hw_ep->max_packet_sz_tx /
@@ -689,15 +683,15 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 			else
 				musb_writew(epio, MUSB_TXMAXP,
 					packet_sz);
-			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+			musb_writeb(epio, MUSB_TXINTERVAL, qtd->intv_reg);
 		} else {
-			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+			musb_writeb(epio, MUSB_NAKLIMIT0, qtd->intv_reg);
 			if (musb->is_multipoint)
 				musb_writeb(epio, MUSB_TYPE0,
-						qh->type_reg);
+						qtd->type_reg);
 		}
 
-		if (can_bulk_split(musb, qh->type))
+		if (can_bulk_split(musb, qtd->type))
 			load_count = min((u32) hw_ep->max_packet_sz_tx,
 						len);
 		else
@@ -705,7 +699,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 
 		if (load_count) {
 			/* PIO to load FIFO */
-			qh->segsize = load_count;
+			qtd->segsize = load_count;
 			musb_write_fifo(hw_ep, load_count, buf);
 		}
 
@@ -717,15 +711,15 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 		u16	csr;
 
 		if (hw_ep->rx_reinit) {
-			musb_rx_reinit(musb, qh, hw_ep);
+			musb_rx_reinit(musb, qtd, hw_ep);
 
 			/* init new state: toggle and NYET, maybe DMA later */
-			if (usb_gettoggle(urb->dev, qh->epnum, 0))
+			if (usb_gettoggle(urb->dev, qtd->epnum, 0))
 				csr = MUSB_RXCSR_H_WR_DATATOGGLE
 					| MUSB_RXCSR_H_DATATOGGLE;
 			else
 				csr = 0;
-			if (qh->type == USB_ENDPOINT_XFER_INT)
+			if (qtd->type == USB_ENDPOINT_XFER_INT)
 				csr |= MUSB_RXCSR_DISNYET;
 
 		} else {
@@ -753,13 +747,14 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
  * Service the default endpoint (ep0) as host.
  * Return true until it's time to start the status stage.
  */
-static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+static bool
+musb_h_ep0_continue(struct musb *musb, u16 len, struct musb_qtd *qtd)
 {
 	bool			 more = false;
 	u8			*fifo_dest = NULL;
 	u16			fifo_count = 0;
 	struct musb_hw_ep	*hw_ep = musb->control_ep;
-	struct musb_qh		*qh = hw_ep->in_qh;
+	struct urb		*urb = qtd->urb;
 	struct usb_ctrlrequest	*request;
 
 	switch (musb->ep0_stage) {
@@ -773,7 +768,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
 		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
 
 		urb->actual_length += fifo_count;
-		if (len < qh->maxpacket) {
+		if (len < qtd->maxpacket) {
 			/* always terminate on short read; it's
 			 * rarely reported as an error.
 			 */
@@ -799,7 +794,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
 		}
 		/* FALLTHROUGH */
 	case MUSB_EP0_OUT:
-		fifo_count = min_t(size_t, qh->maxpacket,
+		fifo_count = min_t(size_t, qtd->maxpacket,
 				   urb->transfer_buffer_length -
 				   urb->actual_length);
 		if (fifo_count) {
@@ -838,11 +833,12 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
 	struct musb_hw_ep	*hw_ep = musb->control_ep;
 	void __iomem		*epio = hw_ep->regs;
 	struct musb_qh		*qh = hw_ep->in_qh;
+	struct musb_qtd		*qtd = musb_qh_get_qtd(qh);
 	bool			complete = false;
 	irqreturn_t		retval = IRQ_NONE;
 
 	/* ep0 only has one queue, "in" */
-	urb = next_urb(qh);
+	urb = qtd->urb;
 
 	musb_ep_select(mbase, 0);
 	csr = musb_readw(epio, MUSB_CSR0);
@@ -917,7 +913,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
 
 	if (!complete) {
 		/* call common logic and prepare response */
-		if (musb_h_ep0_continue(musb, len, urb)) {
+		if (musb_h_ep0_continue(musb, len, qtd)) {
 			/* more packets required */
 			csr = (MUSB_EP0_IN == musb->ep0_stage)
 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
@@ -944,7 +940,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
 
 	/* call completion handler if done */
 	if (complete)
-		musb_advance_schedule(musb, urb, hw_ep, 1);
+		musb_advance_schedule(musb, qtd, hw_ep, 1);
 done:
 	return retval;
 }
@@ -961,7 +957,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
 	void __iomem		*epio = hw_ep->regs;
 	struct musb_qh		*qh = hw_ep->out_qh;
-	struct urb		*urb = next_urb(qh);
+	struct musb_qtd		*qtd = musb_qh_get_qtd(qh);
+	struct urb		*urb = qtd->urb;
 	u32			status = 0;
 	void __iomem		*mbase = musb->mregs;
 
@@ -1031,16 +1028,16 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 	}
 
 	if (!status || usb_pipeisoc(pipe)) {
-		length = qh->segsize;
-		qh->offset += length;
+		length = qtd->segsize;
+		qtd->offset += length;
 
 		if (usb_pipeisoc(pipe)) {
 			struct usb_iso_packet_descriptor	*d;
 
-			d = urb->iso_frame_desc + qh->iso_idx;
+			d = urb->iso_frame_desc + qtd->iso_idx;
 			d->actual_length = length;
 			d->status = status;
-			if (++qh->iso_idx >= urb->number_of_packets) {
+			if (++qtd->iso_idx >= urb->number_of_packets) {
 				done = true;
 			} else {
 				d++;
@@ -1049,14 +1046,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 			}
 		} else {
 			/* see if we need to send more data, or ZLP */
-			if (qh->segsize < qh->maxpacket)
+			if (qtd->segsize < qtd->maxpacket)
 				done = true;
-			else if (qh->offset == urb->transfer_buffer_length
+			else if (qtd->offset == urb->transfer_buffer_length
 					&& !(urb->transfer_flags
 						& URB_ZERO_PACKET))
 				done = true;
 			if (!done) {
-				offset = qh->offset;
+				offset = qtd->offset;
 				length = urb->transfer_buffer_length - offset;
 			}
 		}
@@ -1074,8 +1071,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 	if (done) {
 		/* set status */
 		urb->status = status;
-		urb->actual_length = qh->offset;
-		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+		urb->actual_length = qtd->offset;
+		musb_advance_schedule(musb, qtd, hw_ep, USB_DIR_OUT);
 		return;
 	} else	if (tx_csr & MUSB_TXCSR_DMAENAB) {
 		DBG(1, "not complete, but DMA enabled?\n");
@@ -1089,10 +1086,10 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 	 * (and presumably, FIFO is not half-full) we should write *two*
 	 * packets before updating TXCSR; other docs disagree...
 	 */
-	if (length > qh->maxpacket)
-		length = qh->maxpacket;
+	if (length > qtd->maxpacket)
+		length = qtd->maxpacket;
 	musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
-	qh->segsize = length;
+	qtd->segsize = length;
 
 	musb_ep_select(mbase, epnum);
 	musb_writew(epio, MUSB_TXCSR,
@@ -1100,15 +1097,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 }
 
 
-/* Schedule next QH from musb->in_bulk and move the current qh to
- * the end; avoids starvation for other endpoints.
- */
+/* move the current qtd to the end; avoids starvation for other endpoints. */
 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
 {
 	struct urb		*urb;
 	void __iomem		*mbase = musb->mregs;
 	void __iomem		*epio = ep->regs;
-	struct musb_qh		*cur_qh, *next_qh;
+	struct musb_qh		*qh = musb_ep_get_qh(ep, 1);
+	struct musb_qtd		*cur_qtd, *next_qtd;
 	u16			rx_csr;
 
 	musb_ep_select(mbase, ep->epnum);
@@ -1119,20 +1115,20 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
 	rx_csr &= ~MUSB_RXCSR_DATAERROR;
 	musb_writew(epio, MUSB_RXCSR, rx_csr);
 
-	cur_qh = first_qh(&musb->in_bulk);
-	if (cur_qh) {
-		urb = next_urb(cur_qh);
-		musb_save_toggle(cur_qh, 1, urb);
+	cur_qtd = musb_qh_get_qtd(qh);
+	if (cur_qtd) {
+		urb = cur_qtd->urb;
+		musb_save_toggle(musb_qh_get_qtd(qh), 1, urb);
 
 		/* move cur_qh to end of queue */
-		list_move_tail(&cur_qh->ring, &musb->in_bulk);
+		list_move_tail(&cur_qtd->qtd_list, &qh->qtd_list);
 
 		/* get the next qh from musb->in_bulk */
-		next_qh = first_qh(&musb->in_bulk);
+		next_qtd = musb_qh_get_qtd(qh);
 
 		/* set rx_reinit and schedule the next qh */
 		ep->rx_reinit = 1;
-		musb_start_urb(musb, 1, next_qh);
+		musb_start_urb(musb, 1, next_qtd);
 	}
 }
 
@@ -1146,6 +1142,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
 	void __iomem		*epio = hw_ep->regs;
 	struct musb_qh		*qh = hw_ep->in_qh;
+	struct musb_qtd		*qtd = musb_qh_get_qtd(qh);
 	size_t			xfer_len;
 	void __iomem		*mbase = musb->mregs;
 	int			pipe;
@@ -1156,7 +1153,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 
 	musb_ep_select(mbase, epnum);
 
-	urb = next_urb(qh);
+	urb = qtd->urb;
 	status = 0;
 	xfer_len = 0;
 
@@ -1195,7 +1192,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 
 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
 
-		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+		if (USB_ENDPOINT_XFER_ISOC != qtd->type) {
 			DBG(6, "RX end %d NAK timeout\n", epnum);
 
 			/* NOTE: NAKing is *NOT* an error, so we want to
@@ -1207,9 +1204,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 			 * other devices without this logic.
 			 */
 			if (usb_pipebulk(urb->pipe)
-					&& qh->mux == 1
-					&& !list_is_singular(&musb->in_bulk)) {
-				musb_bulk_rx_nak_timeout(musb, hw_ep);
+					&& !list_is_singular(&qh->qtd_list)) {
+					musb_bulk_rx_nak_timeout(musb, hw_ep);
 				return;
 			}
 			musb_ep_select(mbase, epnum);
@@ -1261,40 +1257,32 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 		}
 
 		/* we are expecting IN packets */
-		done = musb_host_packet_rx(musb, urb,
+		done = musb_host_packet_rx(musb, qtd,
 				epnum, iso_err);
 		DBG(6, "read %spacket\n", done ? "last " : "");
 	}
 
 finish:
 	urb->actual_length += xfer_len;
-	qh->offset += xfer_len;
+	qtd->offset += xfer_len;
 	if (done) {
 		if (urb->status == -EINPROGRESS)
 			urb->status = status;
-		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+		musb_advance_schedule(musb, qtd, hw_ep, USB_DIR_IN);
 	}
 }
 
-/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
- * the software schedule associates multiple such nodes with a given
- * host side hardware endpoint + direction; scheduling may activate
- * that hardware endpoint.
- */
-static int musb_schedule(
+static int musb_select_ep(
 	struct musb		*musb,
-	struct musb_qh		*qh,
+	struct musb_qtd		*qtd,
 	int			is_in)
 {
-	int			idle;
 	int			best_diff;
 	int			best_end, epnum;
 	struct musb_hw_ep	*hw_ep = NULL;
-	struct list_head	*head = NULL;
 
 	/* use fixed hardware for control and bulk */
-	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
-		head = &musb->control;
+	if (qtd->type == USB_ENDPOINT_XFER_CONTROL) {
 		hw_ep = musb->control_ep;
 		goto success;
 	}
@@ -1302,7 +1290,7 @@ static int musb_schedule(
 	/* else, periodic transfers get muxed to other endpoints */
 
 	/*
-	 * We know this qh hasn't been scheduled, so all we need to do
+	 * We know this qtd hasn't been scheduled, so all we need to do
 	 * is choose which hardware endpoint to put it on ...
 	 *
 	 * REVISIT what we really want here is a regular schedule tree
@@ -1316,17 +1304,17 @@ static int musb_schedule(
 			epnum++, hw_ep++) {
 		int	diff;
 
-		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
+		if (hw_ep == musb->bulk_ep)
 			continue;
 
-		if (hw_ep == musb->bulk_ep)
+		if (hw_ep->reserved)
 			continue;
 
 		if (is_in)
 			diff = hw_ep->max_packet_sz_rx;
 		else
 			diff = hw_ep->max_packet_sz_tx;
-		diff -= (qh->maxpacket * qh->hb_mult);
+		diff -= (qtd->maxpacket * qtd->hb_mult);
 
 		if (diff >= 0 && best_diff > diff) {
 			best_diff = diff;
@@ -1334,12 +1322,8 @@ static int musb_schedule(
 		}
 	}
 	/* use bulk reserved ep1 if no other ep is free */
-	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
+	if (best_end < 0 && qtd->type == USB_ENDPOINT_XFER_BULK) {
 		hw_ep = musb->bulk_ep;
-		if (is_in)
-			head = &musb->in_bulk;
-		else
-			head = &musb->out_bulk;
 
 		/* Enable bulk RX NAK timeout scheme when bulk requests are
 		 * multiplexed.  This scheme doen't work in high speed to full
@@ -1348,28 +1332,19 @@ static int musb_schedule(
 		 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
 		 * 4 (8 frame or 8ms) for FS device.
 		 */
-		if (is_in && qh->dev)
-			qh->intv_reg =
-				(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
+		if (is_in && qtd->dev)
+			qtd->intv_reg =
+				(USB_SPEED_HIGH == qtd->dev->speed) ? 8 : 4;
 		goto success;
 	} else if (best_end < 0) {
 		return -ENOSPC;
 	}
 
-	idle = 1;
-	qh->mux = 0;
 	hw_ep = musb->endpoints + best_end;
-	DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+	hw_ep->reserved = true;
+	DBG(4, "qtd %p periodic slot %d\n", qtd, best_end);
 success:
-	if (head) {
-		idle = list_empty(head);
-		list_add_tail(&qh->ring, head);
-		qh->mux = 1;
-	}
-	qh->hw_ep = hw_ep;
-	qh->hep->hcpriv = qh;
-	if (idle)
-		musb_start_urb(musb, is_in, qh);
+	qtd->qh = musb_ep_get_qh(hw_ep, is_in);
 	return 0;
 }
 
@@ -1381,81 +1356,57 @@ static int musb_urb_enqueue(
 	unsigned long			flags;
 	struct musb			*musb = hcd_to_musb(hcd);
 	struct usb_host_endpoint	*hep = urb->ep;
-	struct musb_qh			*qh;
+	struct musb_qtd			*qtd;
 	struct usb_endpoint_descriptor	*epd = &hep->desc;
-	int				ret;
+	int				ret = 0;
 	unsigned			type_reg;
 	unsigned			interval;
+	int				idle = 1;
+	int				is_in;
 
 	/* host role must be active */
 	if (!is_host_active(musb) || !musb->is_active)
 		return -ENODEV;
 
-	spin_lock_irqsave(&musb->lock, flags);
-	ret = usb_hcd_link_urb_to_ep(hcd, urb);
-	qh = ret ? NULL : hep->hcpriv;
-	if (qh)
-		urb->hcpriv = qh;
-	spin_unlock_irqrestore(&musb->lock, flags);
-
-	/* DMA mapping was already done, if needed, and this urb is on
-	 * hep->urb_list now ... so we're done, unless hep wasn't yet
-	 * scheduled onto a live qh.
-	 *
-	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
-	 * disabled, testing for empty qh->ring and avoiding qh setup costs
-	 * except for the first urb queued after a config change.
-	 */
-	if (qh || ret)
-		return ret;
-
-	/* Allocate and initialize qh, minimizing the work done each time
-	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
-	 *
-	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
-	 * for bugs in other kernel code to break this driver...
-	 */
-	qh = kzalloc(sizeof *qh, mem_flags);
-	if (!qh) {
-		spin_lock_irqsave(&musb->lock, flags);
-		usb_hcd_unlink_urb_from_ep(hcd, urb);
-		spin_unlock_irqrestore(&musb->lock, flags);
+	/* allocate and initialize qtd */
+	qtd = musb_allocate_qtd(mem_flags);
+	if (!qtd)
 		return -ENOMEM;
-	}
 
-	qh->hep = hep;
-	qh->dev = urb->dev;
-	INIT_LIST_HEAD(&qh->ring);
-	qh->is_ready = 1;
+	qtd->hep = hep;
+	qtd->urb = urb;
+	qtd->dev = urb->dev;
+	qtd->is_ready = 1;
 
-	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
-	qh->type = usb_endpoint_type(epd);
+	is_in = (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+	qtd->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+	qtd->type = usb_endpoint_type(epd);
 
 	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
 	 * Some musb cores don't support high bandwidth ISO transfers; and
 	 * we don't (yet!) support high bandwidth interrupt transfers.
 	 */
-	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
-	if (qh->hb_mult > 1) {
-		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+	qtd->hb_mult = 1 + ((qtd->maxpacket >> 11) & 0x03);
+	if (qtd->hb_mult > 1) {
+		int ok = (qtd->type == USB_ENDPOINT_XFER_ISOC);
 
 		if (ok)
 			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
 				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
 		if (!ok) {
-			ret = -EMSGSIZE;
-			goto done;
+			kfree(qtd);
+			return -EMSGSIZE;
 		}
-		qh->maxpacket &= 0x7ff;
+		qtd->maxpacket &= 0x7ff;
 	}
 
-	qh->epnum = usb_endpoint_num(epd);
+	qtd->epnum = usb_endpoint_num(epd);
 
 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
-	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+	qtd->addr_reg = (u8) usb_pipedevice(urb->pipe);
 
 	/* precompute rxtype/txtype/type0 register */
-	type_reg = (qh->type << 4) | qh->epnum;
+	type_reg = (qtd->type << 4) | qtd->epnum;
 	switch (urb->dev->speed) {
 	case USB_SPEED_LOW:
 		type_reg |= 0xc0;
@@ -1466,10 +1417,10 @@ static int musb_urb_enqueue(
 	default:
 		type_reg |= 0x40;
 	}
-	qh->type_reg = type_reg;
+	qtd->type_reg = type_reg;
 
 	/* Precompute RXINTERVAL/TXINTERVAL register */
-	switch (qh->type) {
+	switch (qtd->type) {
 	case USB_ENDPOINT_XFER_INT:
 		/*
 		 * Full/low speeds use the  linear encoding,
@@ -1501,56 +1452,66 @@ static int musb_urb_enqueue(
 		 */
 		interval = 0;
 	}
-	qh->intv_reg = interval;
+	qtd->intv_reg = interval;
 
 	/* precompute addressing for external hub/tt ports */
 	if (musb->is_multipoint) {
 		struct usb_device	*parent = urb->dev->parent;
 
 		if (parent != hcd->self.root_hub) {
-			qh->h_addr_reg = (u8) parent->devnum;
+			qtd->h_addr_reg = (u8) parent->devnum;
 
 			/* set up tt info if needed */
 			if (urb->dev->tt) {
-				qh->h_port_reg = (u8) urb->dev->ttport;
+				qtd->h_port_reg = (u8) urb->dev->ttport;
 				if (urb->dev->tt->hub)
-					qh->h_addr_reg =
+					qtd->h_addr_reg =
 						(u8) urb->dev->tt->hub->devnum;
 				if (urb->dev->tt->multi)
-					qh->h_addr_reg |= 0x80;
+					qtd->h_addr_reg |= 0x80;
 			}
 		}
 	}
 
-	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
-	 * until we get real dma queues (with an entry for each urb/buffer),
-	 * we only have work to do in the former case.
-	 */
 	spin_lock_irqsave(&musb->lock, flags);
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret) {
+		kfree(qtd);
+		spin_unlock_irqrestore(&musb->lock, flags);
+		return ret;
+	}
+	/* select hw_ep for this qtd */
 	if (hep->hcpriv) {
-		/* some concurrent activity submitted another urb to hep...
-		 * odd, rare, error prone, but legal.
-		 */
-		kfree(qh);
-		ret = 0;
-	} else
-		ret = musb_schedule(musb, qh,
-				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
-
-	if (ret == 0) {
-		urb->hcpriv = qh;
-		/* FIXME set urb->start_frame for iso/intr, it's tested in
-		 * musb_start_urb(), but otherwise only konicawc cares ...
-		 */
+		qtd->qh = hep->hcpriv;
+	} else {
+		ret = musb_select_ep(musb, qtd, is_in);
+		if (ret) {
+			spin_unlock_irqrestore(&musb->lock, flags);
+			goto done;
+		}
+		hep->hcpriv = qtd->qh;
 	}
+	/* a qtd must match an URB for now.
+	 * REVISIT when there is need for multiple qtds per URB, the
+	 * urb->hcpriv should probable hold qh instead.
+	 */
+	urb->hcpriv = qtd;
+
+	/* check if the queue is empty before adding this qtd to it.
+	 * if this qtd is the first, start.
+	 */
+	idle = list_empty(&qtd->qh->qtd_list);
+	list_add_tail(&qtd->qtd_list, &qtd->qh->qtd_list);
+	if (idle)
+		musb_start_urb(musb, is_in, qtd);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
 done:
 	if (ret != 0) {
 		spin_lock_irqsave(&musb->lock, flags);
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		musb_release_qtd(qtd);
 		spin_unlock_irqrestore(&musb->lock, flags);
-		kfree(qh);
 	}
 	return ret;
 }
@@ -1561,9 +1522,9 @@ done:
  * called with controller locked, irqs blocked
  * that hardware queue advances to the next transfer, unless prevented
  */
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qtd *qtd)
 {
-	struct musb_hw_ep	*ep = qh->hw_ep;
+	struct musb_hw_ep	*ep = qtd->qh->hw_ep;
 	void __iomem		*epio = ep->regs;
 	unsigned		hw_end = ep->epnum;
 	void __iomem		*regs = ep->musb->mregs;
@@ -1600,7 +1561,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
 		musb_h_ep0_flush_fifo(ep);
 	}
 	if (status == 0)
-		musb_advance_schedule(ep->musb, urb, ep, is_in);
+		musb_advance_schedule(ep->musb, qtd, ep, is_in);
 	return status;
 }
 
@@ -1608,6 +1569,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 {
 	struct musb		*musb = hcd_to_musb(hcd);
 	struct musb_qh		*qh;
+	struct musb_qtd		*qtd;
 	unsigned long		flags;
 	int			is_in  = usb_pipein(urb->pipe);
 	int			ret;
@@ -1622,9 +1584,10 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 	if (ret)
 		goto done;
 
-	qh = urb->hcpriv;
-	if (!qh)
+	qtd = urb->hcpriv;
+	if (!qtd)
 		goto done;
+	qh = qtd->qh;
 
 	/*
 	 * Any URB not actively programmed into endpoint hardware can be
@@ -1635,28 +1598,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
 	 * has already been updated.  This is a synchronous abort; it'd be
 	 * OK to hold off until after some IRQ, though.
-	 *
-	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
 	 */
-	if (!qh->is_ready
-			|| urb->urb_list.prev != &qh->hep->urb_list
+	if (!qtd->is_ready
+			|| urb->urb_list.prev != &qtd->hep->urb_list
 			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
-		int	ready = qh->is_ready;
-
-		qh->is_ready = 0;
+		qtd->is_ready = 0;
 		musb_giveback(musb, urb, 0);
-		qh->is_ready = ready;
-
-		/* If nothing else (usually musb_giveback) is using it
-		 * and its URB list has emptied, recycle this qh.
-		 */
-		if (ready && list_empty(&qh->hep->urb_list)) {
-			qh->hep->hcpriv = NULL;
-			list_del(&qh->ring);
-			kfree(qh);
-		}
+		musb_release_qtd(qtd);
 	} else
-		ret = musb_cleanup_urb(urb, qh);
+		ret = musb_cleanup_urb(urb, qtd);
 done:
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return ret;
@@ -1670,6 +1620,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
 	unsigned long		flags;
 	struct musb		*musb = hcd_to_musb(hcd);
 	struct musb_qh		*qh;
+	struct musb_qtd		*qtd;
 	struct urb		*urb;
 
 	spin_lock_irqsave(&musb->lock, flags);
@@ -1677,40 +1628,23 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
 	qh = hep->hcpriv;
 	if (qh == NULL)
 		goto exit;
+	qtd = musb_qh_get_qtd(qh);
 
-	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
-
-	/* Kick the first URB off the hardware, if needed */
-	qh->is_ready = 0;
-	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
-		urb = next_urb(qh);
-
-		/* make software (then hardware) stop ASAP */
-		if (!urb->unlinked)
-			urb->status = -ESHUTDOWN;
+	qtd->is_ready = 0;
+	urb = qtd->urb;
 
-		/* cleanup */
-		musb_cleanup_urb(urb, qh);
+	/* make software (then hardware) stop ASAP */
+	if (!urb->unlinked)
+		urb->status = -ESHUTDOWN;
 
-		/* Then nuke all the others ... and advance the
-		 * queue on hw_ep (e.g. bulk ring) when we're done.
-		 */
-		while (!list_empty(&hep->urb_list)) {
-			urb = next_urb(qh);
-			urb->status = -ESHUTDOWN;
-			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
-		}
-	} else {
-		/* Just empty the queue; the hardware is busy with
-		 * other transfers, and since !qh->is_ready nothing
-		 * will activate any of these as it advances.
-		 */
-		while (!list_empty(&hep->urb_list))
-			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+	/* cleanup the first */
+	musb_cleanup_urb(urb, qtd);
 
-		hep->hcpriv = NULL;
-		list_del(&qh->ring);
-		kfree(qh);
+	/* then nuke every qtd in this endpoint */
+	while (!list_empty(&qh->qtd_list)) {
+		qtd = musb_qh_get_qtd(qh);
+		qtd->urb->status = -ESHUTDOWN;
+		musb_advance_schedule(musb, qtd, qh->hw_ep, is_in);
 	}
 exit:
 	spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 14b0077..f475581 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -45,42 +45,43 @@ static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
 	return (struct musb *) (hcd->hcd_priv);
 }
 
-/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
-struct musb_qh {
+struct musb_qh;
+
+struct musb_qtd {
 	struct usb_host_endpoint *hep;		/* usbcore info */
 	struct usb_device	*dev;
-	struct musb_hw_ep	*hw_ep;		/* current binding */
+	struct musb_qh		*qh;
+	struct urb		*urb;
 
-	struct list_head	ring;		/* of musb_qh */
-	/* struct musb_qh		*next; */	/* for periodic tree */
-	u8			mux;		/* qh multiplexed to hw_ep */
+	struct list_head	qtd_list;	/* of musb_qtd */
 
 	unsigned		offset;		/* in urb->transfer_buffer */
 	unsigned		segsize;	/* current xfer fragment */
+	unsigned		iso_idx;	/* in urb->iso_frame_desc[] */
+
+	u16			maxpacket;
+	u16			frame;		/* for periodic schedule */
 
+	u8			h_addr_reg;	/* hub address register */
+	u8			h_port_reg;	/* hub port register */
 	u8			type_reg;	/* {rx,tx} type register */
 	u8			intv_reg;	/* {rx,tx} interval register */
 	u8			addr_reg;	/* device address register */
-	u8			h_addr_reg;	/* hub address register */
-	u8			h_port_reg;	/* hub port register */
 
 	u8			is_ready;	/* safe to modify hw_ep */
-	u8			type;		/* XFERTYPE_* */
-	u8			epnum;
 	u8			hb_mult;	/* high bandwidth pkts per uf */
-	u16			maxpacket;
-	u16			frame;		/* for periodic schedule */
-	unsigned		iso_idx;	/* in urb->iso_frame_desc[] */
+	u8			epnum;
+	u8			type;		/* XFERTYPE_* */
 };
 
-/* map from control or bulk queue head to the first qh on that ring */
-static inline struct musb_qh *first_qh(struct list_head *q)
-{
-	if (list_empty(q))
-		return NULL;
-	return list_entry(q->next, struct musb_qh, ring);
-}
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+	struct list_head	qtd_list;	/* of musb_qtd */
 
+	struct musb_hw_ep	*hw_ep;
+	struct musb		*musb;
+	struct usb_device	*udev;
+};
 
 extern void musb_root_disconnect(struct musb *musb);
 
@@ -93,20 +94,4 @@ extern int musb_hub_control(struct usb_hcd *hcd,
 
 extern const struct hc_driver musb_hc_driver;
 
-static inline struct urb *next_urb(struct musb_qh *qh)
-{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	struct list_head	*queue;
-
-	if (!qh)
-		return NULL;
-	queue = &qh->hep->urb_list;
-	if (list_empty(queue))
-		return NULL;
-	return list_entry(queue->next, struct urb, urb_list);
-#else
-	return NULL;
-#endif
-}
-
 #endif				/* _MUSB_HOST_H */
-- 
1.5.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux