If room_on_ring() check fails, try to expand the ring and check again. When expand a ring, use a cached ring or allocate new segments, link the original ring and the new ring or segments, update the original ring's segment numbers and the last segment pointer. Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx> --- drivers/usb/host/xhci-mem.c | 97 +++++++++++++++++++++++++++++++++++++++--- drivers/usb/host/xhci-ring.c | 47 +++++++++++++++----- drivers/usb/host/xhci.h | 3 + 3 files changed, 129 insertions(+), 18 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a1b0033..1e68c57 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -127,6 +127,34 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, (unsigned long long)next->dma); } +/* + * Link the ring to the new segments. + * Set Toggle Cycle for the new ring if needed. + */ +static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, + struct xhci_segment *first, struct xhci_segment *last, + unsigned int num_segs, bool link_trbs, bool isoc) +{ + struct xhci_segment *next; + + if (!ring || !first || !last) + return; + + next = ring->enq_seg->next; + xhci_link_segments(xhci, ring->enq_seg, first, link_trbs, isoc); + xhci_link_segments(xhci, last, next, link_trbs, isoc); + ring->num_segs += num_segs; + ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; + + if (link_trbs && ring->enq_seg == ring->last_seg) { + ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control + &= ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT-1].link.control + |= cpu_to_le32(LINK_TOGGLE); + ring->last_seg = last; + } +} + /* XXX: Do we need the hcd structure in all these functions? */ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) { @@ -296,6 +324,67 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, INIT_LIST_HEAD(&ring->td_list); } +/* + * Expand an existing ring. + * Look for a cached ring or allocate a new ring which has same segment numbers + * and link the two rings. + */ +int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_virt_device *xdev, + struct xhci_ring *ring, unsigned int num_trbs, + bool link_trbs, bool isoc, gfp_t flags) +{ + struct xhci_ring *new_ring = NULL; + struct xhci_segment *first; + struct xhci_segment *last; + unsigned int num_segs; + unsigned int num_segs_needed; + int i, num, ret; + + num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / + (TRBS_PER_SEGMENT - 1); + + /* Allocate number of segments we needed, or double the ring size */ + num_segs = ring->num_segs > num_segs_needed ? + ring->num_segs : num_segs_needed; + + /* Attempt to use the ring cache */ + if (xdev && xdev->num_rings_cached > 0) { + num = xdev->num_rings_cached; + for (i = 0; i < num; i++) { + if (xdev->ring_cache[i]->num_segs == num_segs) { + new_ring = xdev->ring_cache[i]; + /* Use the last cached ring to fill the hole */ + if (i < num - 1) { + xdev->ring_cache[i] = + xdev->ring_cache[num - 1]; + } + xdev->ring_cache[num - 1] = NULL; + xdev->num_rings_cached--; + xhci_reinit_cached_ring(xhci, new_ring, + ring->cycle_state, isoc); + first = new_ring->first_seg; + last = new_ring->last_seg; + break; + } + } + } + + if (!new_ring) { + ret = xhci_alloc_segments_for_ring(xhci, &first, &last, + num_segs, ring->cycle_state, + link_trbs, isoc, flags); + if (ret) + return -ENOMEM; + } + + xhci_link_rings(xhci, ring, first, last, num_segs, link_trbs, isoc); + xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n", + ring->num_segs); + + kfree(new_ring); + return 0; +} + #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, @@ -1357,15 +1446,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); /* Set up the endpoint ring */ - /* - * Isochronous endpoint ring needs bigger size because one isoc URB - * carries multiple packets and it will insert multiple tds to the - * ring. - * This should be replaced with dynamic ring resizing in the future. - */ if (usb_endpoint_xfer_isoc(&ep->desc)) virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 8, 1, true, true, mem_flags); + xhci_ring_alloc(xhci, 1, 1, true, true, mem_flags); else virt_dev->eps[ep_index].new_ring = xhci_ring_alloc(xhci, 1, 1, true, false, mem_flags); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f914c2d..34c5495 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2390,9 +2390,12 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. * FIXME allocate segments if the ring is full. */ -static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, - u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags) +static int prepare_ring(struct xhci_hcd *xhci, struct xhci_virt_device *xdev, + struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, + bool isoc, gfp_t mem_flags) { + unsigned int num_trbs_needed; + /* Make sure the endpoint has been added to xHC schedule */ switch (ep_state) { case EP_STATE_DISABLED: @@ -2420,11 +2423,32 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, */ return -EINVAL; } - if (!room_on_ring(xhci, ep_ring, num_trbs)) { - /* FIXME allocate more room */ - xhci_err(xhci, "ERROR no room on ep ring\n"); - return -ENOMEM; - } + + while (1) { + if (room_on_ring(xhci, ep_ring, num_trbs)) + break; + + if (ep_ring == xhci->cmd_ring) { + xhci_err(xhci, "Do not support expand command ring\n"); + return -ENOMEM; + } + + if (ep_ring->enq_seg == ep_ring->deq_seg && + ep_ring->dequeue > ep_ring->enqueue) { + xhci_err(xhci, "Can not expand the ring while dequeue " + "pointer has not passed the link TRB\n"); + return -ENOMEM; + } + + xhci_dbg(xhci, "ERROR no room on ep ring, " + "try ring expansion\n"); + num_trbs_needed = num_trbs - ep_ring->num_trbs_free; + if (xhci_ring_expansion(xhci, xdev, ep_ring, num_trbs_needed, + true, isoc, mem_flags)) { + xhci_err(xhci, "Ring expansion failed\n"); + return -ENOMEM; + } + }; if (enqueue_is_link_trb(ep_ring)) { struct xhci_ring *ring = ep_ring; @@ -2486,7 +2510,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } - ret = prepare_ring(xhci, ep_ring, + ret = prepare_ring(xhci, xdev, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, num_trbs, isoc, mem_flags); if (ret) @@ -3399,8 +3423,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ - ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, - num_trbs, true, mem_flags); + ret = prepare_ring(xhci, xdev, ep_ring, + le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + num_trbs, true, mem_flags); if (ret) return ret; @@ -3458,7 +3483,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, if (!command_must_succeed) reserved_trbs++; - ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, + ret = prepare_ring(xhci, NULL, xhci->cmd_ring, EP_STATE_RUNNING, reserved_trbs, false, GFP_ATOMIC); if (ret < 0) { xhci_err(xhci, "ERR: No room for command on command ring\n"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 981dd83f..52c319b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1613,6 +1613,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); +int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_virt_device *xdev, + struct xhci_ring *ring, unsigned int num_trbs, + bool link_trbs, bool isoc, gfp_t flags); void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); -- 1.7.4.1 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html