Close inspection shows that xhci_queue_isoc_tx() can only fail if prepare_transfer() gets an error from usb_hcd_link_urb_to_ep() for the first TD. The call to prepare_ring() cannot fail because an initial check is made to ensure the ring has enough space for all the TD. The check for 'num_tds' being negative is pointless here, and the check of the 'running_total' is looking for a simple coding error, both can be deleted. Isoc setup is the only code path that calls prepare_transfer() with ts_index != 0 so there is no point calling prepare_ring() again. prepare_ring() is still called twice for the first TD. However since prepare_tranfser() doesn't depend on the number of TD the initial prepare_ring() can be replaced by prepare_transfer() and the existing prepare_transfer() call only made for subsequent TD. This all means that xhci_queue_isoc_tx() can no longer fail. Delete the (possibly dubious) 'cleanup' code and change the return type to void. Signed-off-by: David Laight <david.laight@xxxxxxxxxx> --- This is compile tested only because I don't have an isoc device. However it should be ok. I've just ordered a cheap USB 'sound card' - presumably that will let me test isochronous transfers. Note: This patch will only apply cleanly if my earlier patches are applied first. --- drivers/usb/host/xhci-ring.c | 90 +++++++++++++------------------------------- drivers/usb/host/xhci.h | 1 - 2 files changed, 27 insertions(+), 64 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d0ef8b8..5480215 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2991,7 +2991,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, struct urb_priv *urb_priv; struct xhci_td *td; struct xhci_ring *ep_ring; - struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + struct xhci_ep_ctx *ep_ctx; ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); if (!ep_ring) { @@ -3000,24 +3000,26 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } - ret = prepare_ring(xhci, ep_ring, - le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, - num_trbs, mem_flags); - if (ret) - return ret; - - urb_priv = urb->hcpriv; - td = &urb_priv->td[td_index]; + if (td_index == 0) { + ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - INIT_LIST_HEAD(&td->td_list); - INIT_LIST_HEAD(&td->cancelled_td_list); + ret = prepare_ring(xhci, ep_ring, + le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + num_trbs, mem_flags); + if (unlikely(ret)) + return ret; - if (td_index == 0) { ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); if (unlikely(ret)) return ret; } + urb_priv = urb->hcpriv; + td = &urb_priv->td[td_index]; + + INIT_LIST_HEAD(&td->td_list); + INIT_LIST_HEAD(&td->cancelled_td_list); + td->urb = urb; /* Add this TD to the tail of the endpoint ring's TD list */ list_add_tail(&td->td_list, &ep_ring->td_list); @@ -3647,7 +3649,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, } /* This is for isoc transfer */ -static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +static void xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { struct xhci_ring *ep_ring; @@ -3657,7 +3659,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_generic_trb *start_trb; bool first_trb; u32 field, length_field; - int running_total, trb_buff_len, td_len, td_remain_len, ret; + int running_total, trb_buff_len, td_len, td_remain_len; u64 start_addr, addr; int i, j; bool more_trbs_coming; @@ -3665,10 +3667,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; num_tds = urb->number_of_packets; - if (num_tds < 1) { - xhci_dbg(xhci, "Isoc URB with zero packets?\n"); - return -EINVAL; - } start_addr = (u64) urb->transfer_dma; start_trb = &ep_ring->enqueue->generic; @@ -3698,13 +3696,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); - ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, - urb->stream_id, trbs_per_td, urb, i, mem_flags); - if (ret < 0) { - if (i == 0) - return ret; - goto cleanup; - } + /* prepare_transfer() was called earlier for the first TRB. + * It cannot fail for later TRBs. + */ + if (i != 0) + prepare_transfer(xhci, xhci->devs[slot_id], ep_index, + urb->stream_id, trbs_per_td, urb, i, + mem_flags); td = &urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { @@ -3782,13 +3780,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr += trb_buff_len; td_remain_len -= trb_buff_len; } - - /* Check TD length */ - if (running_total != td_len) { - xhci_err(xhci, "ISOC TD length unmatch\n"); - ret = -EINVAL; - goto cleanup; - } } if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { @@ -3798,31 +3789,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, start_trb); - return 0; -cleanup: - /* Clean up a partially enqueued isoc transfer. */ - - for (i--; i >= 0; i--) - list_del_init(&urb_priv->td[i].td_list); - - /* Use the first TD as a temporary variable to turn the TDs we've queued - * into No-ops with a software-owned cycle bit. That way the hardware - * won't accidentally start executing bogus TDs when we partially - * overwrite them. td->first_trb and td->start_seg are already set. - */ - urb_priv->td[0].last_trb = ep_ring->enqueue; - /* Every TRB except the first & last will have its cycle bit flipped. */ - td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); - - /* Reset the ring enqueue back to the first TRB and its cycle bit. */ - ep_ring->enqueue = urb_priv->td[0].first_trb; - ep_ring->enq_seg = urb_priv->td[0].start_seg; - /* The cycle bit in the first TRB won't be modified, get its inverse. */ - ep_ring->cycle_state = (ep_ring->enqueue->generic.field[3] & - TRB_CYCLE) ^ TRB_CYCLE; - ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; - usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); - return ret; } /* @@ -3836,7 +3802,6 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { struct xhci_virt_device *xdev; - struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx; int start_frame; int xhci_interval; @@ -3845,7 +3810,6 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int ret; xdev = xhci->devs[slot_id]; - ep_ring = xdev->eps[ep_index].ring; ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); num_trbs = 0; @@ -3856,8 +3820,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ - ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, - num_trbs, mem_flags); + ret = prepare_transfer(xhci, xdev, ep_index, + urb->stream_id, num_trbs, urb, 0, mem_flags); if (ret) return ret; @@ -3889,9 +3853,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, urb->dev->speed == USB_SPEED_FULL) urb->interval /= 8; } - ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; - return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); + xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); + return 0; } /**** Command Ring Operations ****/ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a7d8fdd..118f90b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1331,7 +1331,6 @@ struct xhci_ring { unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; - unsigned int num_trbs_free_temp; enum xhci_ring_type type; bool last_td_was_short; }; -- 1.8.1.2 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html