>From 9153dbe78c7daa0e6f18274919ecab6d746a749a Mon Sep 17 00:00:00 2001 From: Andiry Xu <andiry.xu@xxxxxxx> Date: Thu, 8 Apr 2010 17:44:28 +0800 Subject: [PATCH 1/3] xHCI: Introduce urb_priv structure Add urb_priv data structure to xHCI driver. This structure allows multiple xhci TDs to be linked to one urb, which is essential for isochronous transfer. For non-isochronous urb, only one TD is needed for one urb; for isochronous urb, the TD number for the urb is equal to urb->number_of_packets. The length field of urb_priv indicates the number of TDs in the urb. The td_cnt field indicates the number of TDs already processed by xHC. When td_cnt matches length, the urb can be given back to usbcore. When a urb is dequeued or cancelled, add all the TDs to endpoint's cancelled_td_list. When process a cancelled TD, increase td_cnt field. When td_cnt matches urb_priv->length, giveback the cancelled urb. Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx> Signed-off-by: Libin Yang <libin.yang@xxxxxxx> --- drivers/usb/host/xhci-mem.c | 16 ++++++ drivers/usb/host/xhci-ring.c | 113 +++++++++++++++++++++++++++++++----------- drivers/usb/host/xhci.c | 46 +++++++++++++++-- drivers/usb/host/xhci.h | 7 +++ 4 files changed, 147 insertions(+), 35 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bba9b19..32f8cbc 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -886,6 +886,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, return command; } +void urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) +{ + int last; + + if (!urb_priv) + return; + + last = urb_priv->length - 1; + if (last >= 0) { + int i; + for (i = 0; i <= last; i++) + kfree(urb_priv->td[i]); + } + kfree(urb_priv); +} + void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6ba841b..7351134 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -492,16 +492,23 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, struct xhci_td *cur_td, int status, char *adjective) { struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct urb *urb; + struct urb_priv *urb_priv; - cur_td->urb->hcpriv = NULL; - usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); - xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); + urb = cur_td->urb; + urb_priv = urb->hcpriv; - spin_unlock(&xhci->lock); - usb_hcd_giveback_urb(hcd, cur_td->urb, status); - kfree(cur_td); - spin_lock(&xhci->lock); - xhci_dbg(xhci, "%s URB given back\n", adjective); + /* Only giveback urb when this is the last td in urb */ + if (urb_priv->td_cnt == urb_priv->length) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); + + spin_unlock(&xhci->lock); + usb_hcd_giveback_urb(hcd, urb, status); + urb_free_priv(xhci, urb_priv); + spin_lock(&xhci->lock); + xhci_dbg(xhci, "%s URB given back\n", adjective); + } } /* @@ -524,6 +531,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct list_head *entry; struct xhci_td *cur_td = 0; struct xhci_td *last_unlinked_td; + struct urb *urb; + struct urb_priv *urb_priv; struct xhci_dequeue_state deq_state; @@ -589,6 +598,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct xhci_td, cancelled_td_list); list_del(&cur_td->cancelled_td_list); + urb = cur_td->urb; + urb_priv = urb->hcpriv; + urb_priv->td_cnt++; + /* Clean up the cancelled URB */ /* Doesn't matter what we pass for status, since the core will * just overwrite it (because the URB has been unlinked). @@ -632,6 +645,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) struct xhci_ring *ring; struct xhci_td *cur_td; int ret, i, j; + struct urb *urb; + struct urb_priv *urb_priv; ep = (struct xhci_virt_ep *) arg; xhci = ep->xhci; @@ -696,6 +711,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) cur_td = list_first_entry(&ring->td_list, struct xhci_td, td_list); + + urb = cur_td->urb; + urb_priv = urb->hcpriv; + urb_priv->td_cnt++; + list_del(&cur_td->td_list); if (!list_empty(&cur_td->cancelled_td_list)) list_del(&cur_td->cancelled_td_list); @@ -707,6 +727,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) &temp_ep->cancelled_td_list, struct xhci_td, cancelled_td_list); + + urb = cur_td->urb; + urb_priv = urb->hcpriv; + urb_priv->td_cnt++; + list_del(&cur_td->cancelled_td_list); xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN, "killed"); @@ -1125,7 +1150,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, struct urb *urb = 0; int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; + struct urb_priv *urb_priv; u32 trb_comp_code; + bool urb_done = false; xhci_dbg(xhci, "In %s\n", __func__); slot_id = TRB_TO_SLOT_ID(event->flags); @@ -1441,6 +1468,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_cleanup: /* Clean up the endpoint's TD list */ urb = td->urb; + urb_priv = urb->hcpriv; + /* Do one last check of the actual transfer length. * If the host controller said we transferred more data than * the buffer length, urb->actual_length will be a very big @@ -1473,15 +1502,22 @@ td_cleanup: (trb_comp_code != COMP_STALL && trb_comp_code != COMP_BABBLE)) { kfree(td); + urb_priv->td[urb_priv->td_cnt] = NULL; + urb_priv->td_cnt++; + /* Giveback urb when all the tds are completed */ + if (urb_priv->td_cnt == urb_priv->length) { + urb_free_priv(xhci, urb_priv); + urb_done = true; + } + } else { + urb_done = true; } - urb->hcpriv = NULL; } cleanup: inc_deq(xhci, xhci->event_ring, true); xhci_set_hc_event_deq(xhci); - /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ - if (urb) { + if (urb && urb_done) { usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", urb, urb->actual_length, status); @@ -1628,34 +1664,40 @@ static int prepare_transfer(struct xhci_hcd *xhci, unsigned int ep_index, unsigned int num_trbs, struct urb *urb, - struct xhci_td **td, + unsigned int td_index, gfp_t mem_flags) { int ret; + struct urb_priv *urb_priv; + struct xhci_td *td; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ret = prepare_ring(xhci, xdev->eps[ep_index].ring, ep_ctx->ep_info & EP_STATE_MASK, num_trbs, mem_flags); if (ret) return ret; - *td = kzalloc(sizeof(struct xhci_td), mem_flags); - if (!*td) - return -ENOMEM; - INIT_LIST_HEAD(&(*td)->td_list); - INIT_LIST_HEAD(&(*td)->cancelled_td_list); - ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); - if (unlikely(ret)) { - kfree(*td); - return ret; + urb_priv = urb->hcpriv; + td = urb_priv->td[td_index]; + + INIT_LIST_HEAD(&td->td_list); + INIT_LIST_HEAD(&td->cancelled_td_list); + + if (td_index == 0) { + ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); + if (unlikely(ret)) { + urb_free_priv(xhci, urb_priv); + return ret; + } } - (*td)->urb = urb; - urb->hcpriv = (void *) (*td); + td->urb = urb; /* Add this TD to the tail of the endpoint ring's TD list */ - list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); - (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; - (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; + list_add_tail(&td->td_list, &xdev->eps[ep_index].ring->td_list); + td->start_seg = xdev->eps[ep_index].ring->enq_seg; + td->first_trb = xdev->eps[ep_index].ring->enqueue; + + urb_priv->td[td_index] = td; return 0; } @@ -1794,6 +1836,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, { struct xhci_ring *ep_ring; unsigned int num_trbs; + struct urb_priv *urb_priv; struct xhci_td *td; struct scatterlist *sg; int num_sgs; @@ -1809,9 +1852,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_sgs = urb->num_sgs; trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], - ep_index, num_trbs, urb, &td, mem_flags); + ep_index, num_trbs, urb, 0, mem_flags); if (trb_buff_len < 0) return trb_buff_len; + + urb_priv = urb->hcpriv; + td = urb_priv->td[0]; + /* * Don't give the first TRB to the hardware (by toggling the cycle bit) * until we've finished creating all the other TRBs. The ring's cycle @@ -1927,6 +1974,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { struct xhci_ring *ep_ring; + struct urb_priv *urb_priv; struct xhci_td *td; int num_trbs; struct xhci_generic_trb *start_trb; @@ -1968,10 +2016,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs); ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, - num_trbs, urb, &td, mem_flags); + num_trbs, urb, 0, mem_flags); if (ret < 0) return ret; + urb_priv = urb->hcpriv; + td = urb_priv->td[0]; + /* * Don't give the first TRB to the hardware (by toggling the cycle bit) * until we've finished creating all the other TRBs. The ring's cycle @@ -2052,6 +2103,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_generic_trb *start_trb; int start_cycle; u32 field, length_field; + struct urb_priv *urb_priv; struct xhci_td *td; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; @@ -2076,10 +2128,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (urb->transfer_buffer_length > 0) num_trbs++; ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, - urb, &td, mem_flags); + urb, 0, mem_flags); if (ret < 0) return ret; + urb_priv = urb->hcpriv; + td = urb_priv->td[0]; + /* * Don't give the first TRB to the hardware (by toggling the cycle bit) * until we've finished creating all the other TRBs. The ring's cycle diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 492a61c..f432de4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -681,7 +681,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) unsigned long flags; int ret = 0; unsigned int slot_id, ep_index; - + struct urb_priv *urb_priv; + int size, i; if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) return -EINVAL; @@ -701,6 +702,30 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ret = -ESHUTDOWN; goto exit; } + + if (usb_endpoint_xfer_isoc(&urb->ep->desc)) + size = urb->number_of_packets; + else + size = 1; + + urb_priv = kzalloc(sizeof(struct urb_priv) + + size * sizeof(struct xhci_td *), mem_flags); + if (!urb_priv) + return -ENOMEM; + + for (i = 0; i < size; i++) { + urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); + if (!urb_priv->td[i]) { + urb_priv->length = i; + urb_free_priv(xhci, urb_priv); + return -ENOMEM; + } + } + + urb_priv->length = size; + urb_priv->td_cnt = 0; + urb->hcpriv = urb_priv; + if (usb_endpoint_xfer_control(&urb->ep->desc)) { /* Check to see if the max packet size for the default control * endpoint changed during FS device enumeration @@ -741,6 +766,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) exit: return ret; dying: + urb_free_priv(xhci, urb_priv); xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " "non-responsive xHCI host.\n", urb->ep->desc.bEndpointAddress, urb); @@ -782,9 +808,10 @@ dying: int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; - int ret; + int ret, i; u32 temp; struct xhci_hcd *xhci; + struct urb_priv *urb_priv; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; @@ -799,12 +826,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff) { xhci_dbg(xhci, "HW died, freeing TD.\n"); - td = (struct xhci_td *) urb->hcpriv; + urb_priv = urb->hcpriv; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); - kfree(td); + urb_free_priv(xhci, urb_priv); return ret; } if (xhci->xhc_state & XHCI_STATE_DYING) { @@ -827,9 +854,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ep_ring = ep->ring; xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); - td = (struct xhci_td *) urb->hcpriv; - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + urb_priv = urb->hcpriv; + + for (i = 0; i < urb_priv->length; i++) { + td = urb_priv->td[i]; + if (td) + list_add_tail(&td->cancelled_td_list, + &ep->cancelled_td_list); + } + /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index e5eb09b..8308d51 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1015,6 +1015,12 @@ struct xhci_scratchpad { dma_addr_t *sp_dma_buffers; }; +struct urb_priv { + u16 length; + int td_cnt; + struct xhci_td *td[0]; +}; + /* * Each segment table entry is 4*32bits long. 1K seems like an ok size: * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, @@ -1241,6 +1247,7 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_in_ctx, bool allocate_completion, gfp_t mem_flags); +void urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv); void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command); -- 1.6.0.4 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html