[PATCH 2/3 v5] xHCI: isochronous transfer implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



>From 3d8460ef64c333d5c28419bf5b59061d4a054a56 Mon Sep 17 00:00:00 2001
From: Andiry Xu <andiry.xu@xxxxxxx>
Date: Fri, 7 May 2010 02:45:10 +0800
Subject: [PATCH 2/3] xHCI: isochronous transfer implementation

This patch implements isochronous urb enqueue and interrupt handler part.

When an isochronous urb is passed to xHCI driver, first check the transfer
ring to guarantee there is enough room for the whole urb. Then update the
start_frame and interval field of the urb. Always assume URB_ISO_ASAP
is set, and never use urb->start_frame as input.

The number of isoc TDs is equal to urb->number_of_packets. One isoc TD is
consumed every Interval. Each isoc TD consists of an Isoch TRB chained to
zero or more Normal TRBs.

Call prepare_transfer for each TD to do initialization; then calculate the
number of TRBs needed for each TD. If the data required by an isoc TD is
physically contiguous (not crosses a page boundary), then only one isoc TRB
is needed; otherwise one or more additional normal TRB shall be chained to
the isoc TRB by the host.

Set TRB_IOC to the last TRB of each isoc TD. Do not ring endpoint doorbell
to start xHC procession until all the TDs are inserted to the endpoint
transer ring.

In irq handler, update urb status and actual_length, increase
urb_priv->td_cnt. When all the TDs are completed(td_cnt is equal to
urb_priv->length), giveback the urb to usbcore.

Note that sometimes the xHC is unable to process the TDs in time, and will
generate Missed Service Error Event. In this case some TDs on the ring are
not processed and missed. When encounter a Missed Servce Error Event, set
the skip flag of the ep, and process the missed TDs until reach the next
processed TD, then clear the skip flag.

Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
---
 drivers/usb/host/xhci-mem.c  |    1 +
 drivers/usb/host/xhci-ring.c |  433 ++++++++++++++++++++++++++++++++++++++++--
 drivers/usb/host/xhci.h      |   13 ++
 3 files changed, 429 insertions(+), 18 deletions(-)

diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 81cb5b5..37bc0ed 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1107,6 +1107,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
 		virt_dev->num_rings_cached--;
 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
 	}
+	virt_dev->eps[ep_index].skip = false;
 	ep_ring = virt_dev->eps[ep_index].new_ring;
 	ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2136d57..51d7ca5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1239,14 +1239,59 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	}
 
 	event_dma = event->buffer;
+	trb_comp_code = GET_COMP_CODE(event->transfer_len);
+	/*
+	 * When the Isoch ring is empty, the xHC will generate
+	 * a Ring Overrun Event for IN Isoch endpoint or Ring
+	 * Underrun Event for OUT Isoch endpoint.
+	 */
+	switch (trb_comp_code) {
+	case COMP_UNDERRUN:
+		xhci_dbg(xhci, "underrun event on endpoint\n");
+		if (!list_empty(&ep_ring->td_list))
+			xhci_dbg(xhci, "underrun Event for slot %d ep %d "
+					"still with TDs queued?\n",
+				TRB_TO_SLOT_ID(event->flags), ep_index);
+		urb = NULL;
+		goto cleanup;
+	case COMP_OVERRUN:
+		xhci_dbg(xhci, "overrun event on endpoint\n");
+		if (!list_empty(&ep_ring->td_list))
+			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
+					"still with TDs queued?\n",
+				TRB_TO_SLOT_ID(event->flags), ep_index);
+		urb = NULL;
+		goto cleanup;
+	case COMP_MISSED_INT:
+		/*
+		 * When encounter missed service error, one or more isoc tds
+		 * may be missed by xHC.
+		 * Set skip flag of the ep_ring; Complete the missed tds as
+		 * short transfer when process the ep_ring next time.
+		 */
+		ep->skip = true;
+		xhci_dbg(xhci, "Miss service interval error! Set skip flag\n");
+		urb = NULL;
+		goto cleanup;
+	default:
+		break;
+	}
+
+handle_td:
 	/* This TRB should be in the TD at the head of this ring's TD list */
 	xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
 	if (list_empty(&ep_ring->td_list)) {
-		xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
-				TRB_TO_SLOT_ID(event->flags), ep_index);
+		xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+				"with no TDs queued?\n",
+			  TRB_TO_SLOT_ID(event->flags), ep_index);
 		xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
-				(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+			 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
 		xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+		if (ep->skip) {
+			ep->skip = false;
+			xhci_dbg(xhci, "td_list is empty while skip flag set. "
+					"Clear skip flag.\n");
+		}
 		urb = NULL;
 		goto cleanup;
 	}
@@ -1258,25 +1303,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
 			td->last_trb, event_dma);
 	xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
-	if (!event_seg) {
+	if (event_seg && ep->skip) {
+		xhci_dbg(xhci, "Found td. Clear skip flag.\n");
+		ep->skip = false;
+	}
+	if (!event_seg && !ep->skip) {
 		/* HC is busted, give up! */
 		xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
 		return -ESHUTDOWN;
 	}
-	event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
-	xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
-			(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
-	xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
-			lower_32_bits(event->buffer));
-	xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
-			upper_32_bits(event->buffer));
-	xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
-			(unsigned int) event->transfer_len);
-	xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
-			(unsigned int) event->flags);
 
+	if (event_seg) {
+		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
+				sizeof(*event_trb)];
+		xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+				(unsigned int)
+				(event->flags & TRB_TYPE_BITMASK)>>10);
+		xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
+				lower_32_bits(event->buffer));
+		xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
+				upper_32_bits(event->buffer));
+		xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+				(unsigned int) event->transfer_len);
+		xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+				(unsigned int) event->flags);
+		/*
+		 * No-op TRB should not trigger interrupts. If event_trb is
+		 * a no-op TRB, it means the corresponding TD has been
+		 * cancelled. Just ignore the TD.
+		 */
+		if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
+				 == TRB_TYPE(TRB_TR_NOOP)) {
+			xhci_dbg(xhci, "event_trb is a no-op TRB. Skip it\n");
+			goto cleanup;
+		}
+	}
 	/* Look for common error cases */
-	trb_comp_code = GET_COMP_CODE(event->transfer_len);
 	switch (trb_comp_code) {
 	/* Skip codes that require special handling depending on
 	 * transfer type
@@ -1312,6 +1374,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
 		status = -ENOSR;
 		break;
+	case COMP_BW_OVER:
+		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
+		break;
+	case COMP_BUFF_OVER:
+		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
+		break;
 	default:
 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
 			status = 0;
@@ -1400,6 +1468,100 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				}
 			}
 		}
+	} else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+		int idx;
+		int len = 0;
+		int skip_td = 0;
+		union xhci_trb *cur_trb;
+		struct xhci_segment *cur_seg;
+
+		urb_priv = td->urb->hcpriv;
+		idx = urb_priv->td_cnt;
+		status = 0;
+
+		if (ep->skip) {
+			/* treat missed tds as short transfer */
+			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+				td->urb->iso_frame_desc[idx].status =
+						 -EREMOTEIO;
+			else
+				td->urb->iso_frame_desc[idx].status = 0;
+		} else {
+			/* handle completion code */
+			switch (trb_comp_code) {
+			case COMP_SUCCESS:
+				td->urb->iso_frame_desc[idx].status = 0;
+				xhci_dbg(xhci, "Successful isoc "
+						"transfer!\n");
+				break;
+			case COMP_SHORT_TX:
+				xhci_dbg(xhci, "short transfer on isoc ep\n");
+				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+					td->urb->iso_frame_desc[idx].status =
+						 -EREMOTEIO;
+				else
+					td->urb->iso_frame_desc[idx].status = 0;
+				break;
+			case COMP_BW_OVER:
+				td->urb->iso_frame_desc[idx].status = -ECOMM;
+				skip_td = 1;
+				break;
+			case COMP_MISSED_INT:
+				td->urb->iso_frame_desc[idx].status = -ECOMM;
+				skip_td = 1;
+				break;
+			case COMP_BUFF_OVER:
+				td->urb->iso_frame_desc[idx].status =
+						 -EOVERFLOW;
+				skip_td = 1;
+				break;
+			case COMP_STALL:
+				td->urb->iso_frame_desc[idx].status = -EPROTO;
+				skip_td = 1;
+				break;
+			case COMP_BABBLE:
+				td->urb->iso_frame_desc[idx].status =
+						 -EOVERFLOW;
+				skip_td = 1;
+				break;
+			case COMP_STOP_INVAL:
+				td->urb->iso_frame_desc[idx].status =
+						 -EREMOTEIO;
+				break;
+			case COMP_STOP:
+				td->urb->iso_frame_desc[idx].status =
+						 -EREMOTEIO;
+			break;
+			default:
+				td->urb->iso_frame_desc[idx].status = -1;
+				break;
+			}
+		}
+
+		/* calc actual length */
+		if (ep->skip) {
+			td->urb->iso_frame_desc[idx].actual_length = 0;
+		} else if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
+			td->urb->iso_frame_desc[idx].actual_length =
+				td->urb->iso_frame_desc[idx].length;
+			td->urb->actual_length +=
+				td->urb->iso_frame_desc[idx].length;
+		} else {
+			for (cur_trb = ep_ring->dequeue,
+			     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
+			     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+				if ((cur_trb->generic.field[3] &
+				 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+				    (cur_trb->generic.field[3] &
+				 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+					len +=
+					    TRB_LEN(cur_trb->generic.field[2]);
+			}
+			len += TRB_LEN(cur_trb->generic.field[2]) -
+				TRB_LEN(event->transfer_len);
+			td->urb->iso_frame_desc[idx].actual_length = len;
+			td->urb->actual_length += len;
+		}
 	} else {
 		switch (trb_comp_code) {
 		case COMP_SUCCESS:
@@ -1577,8 +1739,14 @@ td_cleanup:
 		}
 	}
 cleanup:
-	inc_deq(xhci, xhci->event_ring, true);
-	xhci_set_hc_event_deq(xhci);
+	/*
+	 * Do not update event ring dequeue pointer if ep->skip is set.
+	 * Will roll back to continue process missed tds.
+	 */
+	if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+		inc_deq(xhci, xhci->event_ring, true);
+		xhci_set_hc_event_deq(xhci);
+	}
 
 	if (urb && urb_done) {
 		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
@@ -1588,6 +1756,16 @@ cleanup:
 		usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
 		spin_lock(&xhci->lock);
 	}
+
+	/*
+	 * If ep->skip is set, it means there are missed tds on the
+	 * endpoint ring need to take care of.
+	 * Process them as short transfer until reach the td pointed by
+	 * the event.
+	 */
+	if (ep->skip && trb_comp_code != COMP_MISSED_INT)
+		goto handle_td;
+
 	return 0;
 }
 
@@ -2277,6 +2455,225 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	return 0;
 }
 
+static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
+		struct urb *urb, int i)
+{
+	int num_trbs = 0;
+	u64 addr, td_len, running_total;
+
+	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+	td_len = urb->iso_frame_desc[i].length;
+
+	running_total = TRB_MAX_BUFF_SIZE -
+			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	if (running_total != 0)
+		num_trbs++;
+
+	while (running_total < td_len) {
+		num_trbs++;
+		running_total += TRB_MAX_BUFF_SIZE;
+	}
+
+	return num_trbs;
+}
+
+/* This is for isoc transfer */
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	struct urb_priv *urb_priv;
+	struct xhci_td *td;
+	int num_tds, trbs_per_td;
+	struct xhci_generic_trb *start_trb;
+	bool first_trb;
+	int start_cycle;
+	u32 field, length_field;
+
+	int running_total, trb_buff_len, td_len, td_remain_len, ret;
+	u64 start_addr, addr;
+	int i, j;
+
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+
+	num_tds = urb->number_of_packets;
+	if (num_tds < 1) {
+		xhci_dbg(xhci, "ISOC URB with zero packets?\n");
+		return -EINVAL;
+	}
+
+	if (!in_interrupt())
+		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+				" addr = %#llx, num_tds = %d\n",
+				urb->ep->desc.bEndpointAddress,
+				urb->transfer_buffer_length,
+				urb->transfer_buffer_length,
+				(unsigned long long)urb->transfer_dma,
+				num_tds);
+
+	start_addr = (u64) urb->transfer_dma;
+
+	/* Queue the first TRB, even if it's zero-length */
+	for (i = 0; i < num_tds; i++) {
+		first_trb = true;
+
+		running_total = 0;
+		start_trb = &ep_ring->enqueue->generic;
+		start_cycle = ep_ring->cycle_state;
+
+		addr = start_addr + urb->iso_frame_desc[i].offset;
+		td_len = urb->iso_frame_desc[i].length;
+		td_remain_len = td_len;
+
+		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
+
+		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+				urb->stream_id, trbs_per_td, urb, i, mem_flags);
+		if (ret < 0)
+			return ret;
+
+		urb_priv = urb->hcpriv;
+		td = urb_priv->td[i];
+
+		for (j = 0; j < trbs_per_td; j++) {
+			u32 remainder = 0;
+			field = 0;
+
+			if (first_trb) {
+				/* Queue the isoc TRB */
+				field |= TRB_TYPE(TRB_ISOC);
+				/* Assume URB_ISO_ASAP is set */
+				field |= TRB_SIA;
+				first_trb = false;
+			} else {
+				/* Queue other normal TRBs */
+				field |= TRB_TYPE(TRB_NORMAL);
+				field |= ep_ring->cycle_state;
+			}
+
+			/* Chain all the TRBs together; clear the chain bit in
+			 * the last TRB to indicate it's the last TRB in the
+			 * chain.
+			 */
+
+			if (j < trbs_per_td - 1) {
+				field |= TRB_CHAIN;
+			} else {
+				td->last_trb = ep_ring->enqueue;
+				field |= TRB_IOC;
+			}
+
+			/* Calculate TRB length */
+			trb_buff_len = TRB_MAX_BUFF_SIZE -
+				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			if (trb_buff_len > td_remain_len)
+				trb_buff_len = td_remain_len;
+
+			remainder = xhci_td_remainder(td_len - running_total);
+			length_field = TRB_LEN(trb_buff_len) |
+				remainder |
+				TRB_INTR_TARGET(0);
+			queue_trb(xhci, ep_ring, false,
+				lower_32_bits(addr),
+				upper_32_bits(addr),
+				length_field,
+				/* We always want to know if the TRB was short,
+				 * or we won't get an event when it completes.
+				 * (Unless we use event data TRBs, which are a
+				 * waste of space and HC resources.)
+				 */
+				field | TRB_ISP);
+			running_total += trb_buff_len;
+
+			addr += trb_buff_len;
+			td_remain_len -= trb_buff_len;
+		}
+
+		/* Check TD length */
+		if (running_total != td_len) {
+			xhci_dbg(xhci, "ISOC TD length unmatch\n");
+			return -EINVAL;
+		}
+
+		wmb();
+		start_trb->field[3] |= start_cycle;
+	}
+
+	ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+	return 0;
+}
+
+/*
+ * Check transfer ring to guarantee there is enough room for the urb.
+ * Update ISO URB start_frame and interval.
+ * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
+ * update the urb->start_frame by now.
+ * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ */
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_ring *ep_ring;
+	struct xhci_ep_ctx *ep_ctx;
+	int start_frame;
+	int xhci_interval;
+	int ep_interval;
+	int num_tds, num_trbs, i;
+	int ret;
+
+	xdev = xhci->devs[slot_id];
+	ep_ring = xdev->eps[ep_index].ring;
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+	num_trbs = 0;
+	num_tds = urb->number_of_packets;
+	for (i = 0; i < num_tds; i++)
+		num_trbs += count_isoc_trbs_needed(xhci, urb, i);
+
+	/* Check the ring to guarantee there is enough room for the whole urb.
+	 * Do not insert any td of the urb to the ring if the check failed.
+	 */
+	ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
+				num_trbs, mem_flags);
+	if (ret)
+		return ret;
+
+	start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+	start_frame &= 0x3fff;
+
+	urb->start_frame = start_frame;
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		urb->start_frame >>= 3;
+
+	xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+	ep_interval = urb->interval;
+	/* Convert to microframes */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		ep_interval *= 8;
+	/* FIXME change this to a warning and a suggestion to use the new API
+	 * to set the polling interval (once the API is added).
+	 */
+	if (xhci_interval != ep_interval) {
+		if (!printk_ratelimit())
+			dev_dbg(&urb->dev->dev, "Driver uses different interval"
+					" (%d microframe%s) than xHCI "
+					"(%d microframe%s)\n",
+					ep_interval,
+					ep_interval == 1 ? "" : "s",
+					xhci_interval,
+					xhci_interval == 1 ? "" : "s");
+		urb->interval = xhci_interval;
+		/* Convert back to frames for LS/FS devices */
+		if (urb->dev->speed == USB_SPEED_LOW ||
+				urb->dev->speed == USB_SPEED_FULL)
+			urb->interval /= 8;
+	}
+	return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
 /****		Command Ring Operations		****/
 
 /* Generic function for queueing a command TRB on the command ring.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8c27a9a..cf2f21f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -720,6 +720,14 @@ struct xhci_virt_ep {
 	struct timer_list	stop_cmd_timer;
 	int			stop_cmds_pending;
 	struct xhci_hcd		*xhci;
+	/*
+	 * Sometimes the xHC can not process isochronous endpoint ring quickly
+	 * enough, and it will miss some isoc tds on the ring and generate
+	 * a Missed Service Error Event.
+	 * Set skip flag when receive a Missed Service Error Event and
+	 * process the missed tds on the endpoint ring.
+	 */
+	bool			skip;
 };
 
 struct xhci_virt_device {
@@ -911,6 +919,9 @@ struct xhci_event_cmd {
 /* Control transfer TRB specific fields */
 #define TRB_DIR_IN		(1<<16)
 
+/* Isochronous TRB specific fields */
+#define TRB_SIA			(1<<31)
+
 struct xhci_generic_trb {
 	u32 field[4];
 };
@@ -1394,6 +1405,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
 		int slot_id, unsigned int ep_index);
 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
 		int slot_id, unsigned int ep_index);
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index);
 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
 		u32 slot_id, bool command_must_succeed);
 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
-- 
1.6.3.3



--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux