[RFC v3 7/8] xHCI: wait for dequeue pointer move pass link TRB

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When trying to expand a ring, if enqueue pointer is behind dequeue pointer
and they're in the same segment, we can not expand the ring until the
dequeue pointer move pass the link TRB. This is because if the hardware
has not cached the link TRB, it will jump to the new segments, but the
data there is not right and the hardware may stop if the cycle_bit
indicates it's software owned.

Wait for the dequeue pointer move pass the link TRB, and then it's safe
to expand the ring. Pending all the urbs submitted during the waiting period
and queue them after the ring successfully expanded. When an endpoint is
stopped, clear all the pending urbs.

Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
---
 drivers/usb/host/xhci-mem.c  |    2 +
 drivers/usb/host/xhci-ring.c |  169 ++++++++++++++++++++++++++++++++++++++++--
 drivers/usb/host/xhci.c      |   22 +++---
 drivers/usb/host/xhci.h      |   11 +++
 4 files changed, 184 insertions(+), 20 deletions(-)

diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 5d4f795..a70462c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -183,6 +183,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
 	 * handling ring expansion, set the cycle state equal to the old ring.
 	 */
 	ring->cycle_state = cycle_state;
+	ring->waiting_deq_ptr = 0;
 	/* Not necessary for new rings, but needed for re-initialized rings */
 	ring->enq_updates = 0;
 	ring->deq_updates = 0;
@@ -242,6 +243,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 
 	ring->num_segs = num_segs;
 	INIT_LIST_HEAD(&ring->td_list);
+	INIT_LIST_HEAD(&ring->pending_urb_list);
 	ring->type = type;
 	if (num_segs == 0)
 		return ring;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 14f368f..6f1b664 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -140,6 +140,74 @@ static void next_trb(struct xhci_hcd *xhci,
 }
 
 /*
+ * Expand the ring, and queue the URBs. If failed, giveback the URB.
+ */
+static int xhci_queue_pending_urbs(struct xhci_hcd *xhci,
+					struct xhci_ring *ep_ring)
+{
+	struct xhci_pending_urb *curr, *next;
+	struct urb *urb;
+	unsigned int slot_id, ep_index;
+	int ret = 0;
+	bool expansion_failed = false;
+
+	if (ep_ring->type == TYPE_COMMAND || ep_ring->type == TYPE_EVENT)
+		return -EINVAL;
+
+	/* It's safe to expand the ring now */
+	ret = xhci_ring_expansion(xhci, ep_ring, ep_ring->pending_num_trbs,
+					GFP_ATOMIC);
+	if (ret) {
+		xhci_err(xhci, "Ring expansion failed\n");
+		expansion_failed = true;
+	}
+
+	list_for_each_entry_safe(curr, next, &ep_ring->pending_urb_list, list) {
+		urb = curr->urb;
+		if (!expansion_failed) {
+			slot_id = urb->dev->slot_id;
+			ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+			switch (ep_ring->type) {
+			case TYPE_CTRL:
+				ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
+					slot_id, ep_index);
+				break;
+			case TYPE_ISOC:
+				ret = xhci_queue_isoc_tx_prepare(xhci,
+					GFP_ATOMIC, urb, slot_id, ep_index);
+				break;
+			case TYPE_BULK:
+			case TYPE_STREAM:
+				ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
+					slot_id, ep_index);
+				break;
+			case TYPE_INTR:
+				ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
+					slot_id, ep_index);
+				break;
+			default:
+				ret = -EINVAL;
+				break;
+			}
+		}
+
+		if (!ret || expansion_failed) {
+			/* The urb is failed to queue. Give it back... */
+			xhci_urb_free_priv(xhci, urb->hcpriv);
+			spin_unlock(&xhci->lock);
+			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb,
+						-EPIPE);
+			spin_lock(&xhci->lock);
+		}
+		list_del(&curr->list);
+		kfree(curr);
+	}
+
+	ep_ring->pending_num_trbs = 0;
+	return ret;
+}
+
+/*
  * See Cycle bit rules. SW is the consumer for the event ring only.
  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
  */
@@ -171,6 +239,14 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 		ring->deq_seg = ring->deq_seg->next;
 		ring->dequeue = ring->deq_seg->trbs;
 		next = ring->dequeue;
+		/*
+		 * If there are pending URBs, first expand the ring, and
+		 * queue the URBs.
+		 */
+		if (ring->waiting_deq_ptr) {
+			xhci_queue_pending_urbs(xhci, ring);
+			ring->waiting_deq_ptr = 0;
+		}
 	}
 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 }
@@ -618,6 +694,47 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 	}
 }
 
+static void xhci_giveback_pending_urbs(struct xhci_hcd *xhci,
+		struct xhci_ring *ep_ring)
+{
+	struct xhci_pending_urb *curr, *next;
+
+	list_for_each_entry_safe(curr, next,
+			&ep_ring->pending_urb_list, list) {
+		xhci_urb_free_priv(xhci, curr->urb->hcpriv);
+		spin_unlock(&xhci->lock);
+		usb_hcd_giveback_urb(bus_to_hcd(curr->urb->dev->bus), curr->urb,
+						-ESHUTDOWN);
+		spin_lock(&xhci->lock);
+		list_del(&curr->list);
+		kfree(curr);
+	}
+	ep_ring->pending_num_trbs = 0;
+	ep_ring->waiting_deq_ptr = 0;
+}
+
+/* Scan all the rings of the endpoint, giveback pending URBs */
+static int xhci_clear_ep_pending_urbs(struct xhci_hcd *xhci,
+		struct xhci_virt_ep *ep)
+{
+	struct xhci_ring	*ep_ring;
+	int i;
+
+	if (!(ep->ep_state & EP_HAS_STREAMS)) {
+		ep_ring = ep->ring;
+		if (ep_ring && ep_ring->waiting_deq_ptr)
+			xhci_giveback_pending_urbs(xhci, ep_ring);
+	} else {
+		for (i = 0; i < ep->stream_info->num_streams; ++i) {
+			ep_ring = ep->stream_info->stream_rings[i];
+			if (ep_ring && ep_ring->waiting_deq_ptr)
+				xhci_giveback_pending_urbs(xhci, ep_ring);
+		}
+	}
+
+	return 0;
+}
+
 /*
  * When we get a command completion for a Stop Endpoint Command, we need to
  * unlink any cancelled TDs from the ring.  There are two ways to do that:
@@ -662,6 +779,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
 	ep = &xhci->devs[slot_id]->eps[ep_index];
 
+	xhci_clear_ep_pending_urbs(xhci, ep);
+
 	if (list_empty(&ep->cancelled_td_list)) {
 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
 		ep->stopped_td = NULL;
@@ -842,6 +961,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
 			continue;
 		for (j = 0; j < 31; j++) {
 			temp_ep = &xhci->devs[i]->eps[j];
+			xhci_clear_ep_pending_urbs(xhci, temp_ep);
 			ring = temp_ep->ring;
 			if (!ring)
 				continue;
@@ -2394,7 +2514,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  * FIXME allocate segments if the ring is full.
  */
 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
-		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+		struct urb *urb, u32 ep_state, unsigned int num_trbs,
+		gfp_t mem_flags)
 {
 	unsigned int num_trbs_needed;
 
@@ -2435,11 +2556,27 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
 			return -ENOMEM;
 		}
 
+		/* Waiting for dequeue pointer to walk pass the link TRB;
+		 * until then, add the URBs to the ring's URB list,
+		 * queue them later.
+		 */
 		if (ep_ring->enq_seg == ep_ring->deq_seg &&
 				ep_ring->dequeue > ep_ring->enqueue) {
-			xhci_err(xhci, "Can not expand the ring while dequeue "
-				"pointer has not passed the link TRB\n");
-			return -ENOMEM;
+			struct xhci_pending_urb *pending_urb;
+
+			xhci_dbg(xhci, "Waiting for dequeue "
+				"pointer to pass the link TRB\n");
+			pending_urb = kzalloc(sizeof(struct xhci_pending_urb),
+						mem_flags);
+			if (!pending_urb)
+				return -ENOMEM;
+			ep_ring->waiting_deq_ptr = 1;
+			ep_ring->pending_num_trbs += num_trbs;
+			pending_urb->urb = urb;
+			INIT_LIST_HEAD(&pending_urb->list);
+			list_add_tail(&pending_urb->list,
+					&ep_ring->pending_urb_list);
+			return -EBUSY;
 		}
 
 		xhci_dbg(xhci, "ERROR no room on ep ring, "
@@ -2512,7 +2649,22 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 		return -EINVAL;
 	}
 
-	ret = prepare_ring(xhci, ep_ring,
+	if (ep_ring->waiting_deq_ptr) {
+		struct xhci_pending_urb *pending_urb;
+
+		pending_urb = kzalloc(sizeof(struct xhci_pending_urb),
+						mem_flags);
+		if (!pending_urb)
+			return -ENOMEM;
+		xhci_dbg(xhci, "Adding urb %p to ring's pending list\n", urb);
+		ep_ring->pending_num_trbs += num_trbs;
+		pending_urb->urb = urb;
+		INIT_LIST_HEAD(&pending_urb->list);
+		list_add_tail(&pending_urb->list, &ep_ring->pending_urb_list);
+		return -EBUSY;
+	}
+
+	ret = prepare_ring(xhci, ep_ring, urb,
 			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
 			   num_trbs, mem_flags);
 	if (ret)
@@ -3424,8 +3576,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
 	/* Check the ring to guarantee there is enough room for the whole urb.
 	 * Do not insert any td of the urb to the ring if the check failed.
 	 */
-	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
-			   num_trbs, mem_flags);
+	ret = prepare_ring(xhci, ep_ring, urb,
+			le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+			num_trbs, mem_flags);
 	if (ret)
 		return ret;
 
@@ -3483,7 +3636,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
 	if (!command_must_succeed)
 		reserved_trbs++;
 
-	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
+	ret = prepare_ring(xhci, xhci->cmd_ring, NULL, EP_STATE_RUNNING,
 			reserved_trbs, GFP_ATOMIC);
 	if (ret < 0) {
 		xhci_err(xhci, "ERR: No room for command on command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e70ddf2..f891943 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1153,9 +1153,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
 			goto dying;
 		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
-		if (ret)
-			goto free_priv;
-		spin_unlock_irqrestore(&xhci->lock, flags);
 	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
 		spin_lock_irqsave(&xhci->lock, flags);
 		if (xhci->xhc_state & XHCI_STATE_DYING)
@@ -1175,28 +1172,29 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
 			ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
 					slot_id, ep_index);
 		}
-		if (ret)
-			goto free_priv;
-		spin_unlock_irqrestore(&xhci->lock, flags);
 	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
 		spin_lock_irqsave(&xhci->lock, flags);
 		if (xhci->xhc_state & XHCI_STATE_DYING)
 			goto dying;
 		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
-		if (ret)
-			goto free_priv;
-		spin_unlock_irqrestore(&xhci->lock, flags);
 	} else {
 		spin_lock_irqsave(&xhci->lock, flags);
 		if (xhci->xhc_state & XHCI_STATE_DYING)
 			goto dying;
 		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
-		if (ret)
-			goto free_priv;
-		spin_unlock_irqrestore(&xhci->lock, flags);
 	}
+
+	if (ret == -EBUSY)
+		/* We're waiting for dequeue pointer walk pass link TRB and
+		 * will queue this URB later. Tell usbcore it's queued
+		 * successfully...
+		 */
+		ret = 0;
+	if (ret)
+		goto free_priv;
+	spin_unlock_irqrestore(&xhci->lock, flags);
 exit:
 	return ret;
 dying:
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1434508..09dbb7c 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1254,6 +1254,11 @@ struct xhci_dequeue_state {
 	int new_cycle_state;
 };
 
+struct xhci_pending_urb {
+	struct urb *urb;
+	struct list_head list;
+};
+
 enum xhci_ring_type {
 	TYPE_CTRL = 0,
 	TYPE_ISOC,
@@ -1274,6 +1279,7 @@ struct xhci_ring {
 	struct xhci_segment	*deq_seg;
 	unsigned int		deq_updates;
 	struct list_head	td_list;
+	struct list_head	pending_urb_list;
 	/*
 	 * Write the cycle state into the TRB cycle field to give ownership of
 	 * the TRB to the host controller (if we are the producer), or to check
@@ -1283,8 +1289,13 @@ struct xhci_ring {
 	unsigned int		stream_id;
 	unsigned int		num_segs;
 	unsigned int		num_trbs_free;
+	unsigned int		pending_num_trbs;
 	enum xhci_ring_type	type;
 	bool			last_td_was_short;
+	/* Flag to indicate the driver is waiting for dequeue pointer pass link
+	 * TRB. For ring expansion usage.
+	 */
+	unsigned		waiting_deq_ptr:1;
 };
 
 struct xhci_erst_entry {
-- 
1.7.4.1


--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux