When an endpoint on a device under an xHCI host controller stalls, the host controller driver must let the hardware know that the USB core has successfully cleared the halt condition. The HCD submits a Reset Endpoint Command, and must wait for the command to finish before the HCD will process any queued transfers. To support drivers that want to clear the halt condition in interrupt context, the xHCI driver will callback a completion function when it receives an event for the reset endpoint command. Signed-off-by: Sarah Sharp <sarah.a.sharp@xxxxxxxxxxxxxxx> --- drivers/usb/host/xhci-hcd.c | 40 ++++++++++++++++++++++++++++ drivers/usb/host/xhci-mem.c | 1 + drivers/usb/host/xhci-pci.c | 1 + drivers/usb/host/xhci-ring.c | 60 ++++++++++++++++++++++++++++++++++++++--- drivers/usb/host/xhci.h | 21 +++++++++++++- 5 files changed, 116 insertions(+), 7 deletions(-) diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index dba3e07..98cef9b 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -1026,6 +1026,46 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_zero_in_ctx(virt_dev); } +/* Deal with stalled endpoints. The core should have sent the control message + * to clear the halt condition. However, we need to make the xHCI hardware + * reset its sequence number, since a device will expect a sequence number of + * zero after the halt condition is cleared. + */ +void xhci_start_endpoint_reset(struct usb_hcd *hcd, + struct endpoint_reset_callback *callback) +{ + struct xhci_hcd *xhci; + int slot_id; + unsigned int ep_index; + struct xhci_command_completion *reset_completion; + unsigned long flags; + int ret; + + xhci = hcd_to_xhci(hcd); + slot_id = callback->dev->slot_id; + ep_index = xhci_get_endpoint_index(&callback->ep->desc); + + reset_completion = kzalloc(sizeof(*reset_completion), GFP_ATOMIC); + if (!reset_completion) { + xhci_warn(xhci, "Could not allocate reset command completion\n"); + return; + } + INIT_LIST_HEAD(&reset_completion->cmd_list); + reset_completion->data = callback; + + xhci_dbg(xhci, "Queueing reset endpoint command\n"); + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_reset_ep(xhci, slot_id, ep_index); + if (!ret) { + list_add_tail(&reset_completion->cmd_list, &xhci->devs[slot_id]->cmd_list); + xhci_ring_cmd_db(xhci); + } + spin_unlock_irqrestore(&xhci->lock, flags); + + if (ret) + xhci_warn(xhci, "FIXME allocate a new ring segment\n"); +} + /* * At this point, the struct usb_device is about to go away, the device has * disconnected, and all traffic has been stopped and the endpoints have been diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de..7b9448c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -260,6 +260,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, goto fail; init_completion(&dev->cmd_completion); + INIT_LIST_HEAD(&dev->cmd_list); /* * Point to output device context in dcbaa; skip the output control diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1462709..ab02918 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { .free_dev = xhci_free_dev, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, + .start_endpoint_reset = xhci_start_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d5b9529..96a7089 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -603,13 +603,45 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ring_ep_doorbell(xhci, slot_id, ep_index); } +static void handle_reset_ep_completion(struct xhci_hcd *xhci, + struct xhci_event_cmd *event, + union xhci_trb *trb) +{ + int slot_id; + struct xhci_command_completion *reset_completion; + struct endpoint_reset_callback *callback_info; -static void handle_cmd_completion(struct xhci_hcd *xhci, + slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); + if (list_empty(&xhci->devs[slot_id]->cmd_list)) { + xhci_warn(xhci, "WARN: Spurious reset endpoint command completion\n"); + return; + } + reset_completion = list_entry(xhci->devs[slot_id]->cmd_list.next, + struct xhci_command_completion, cmd_list); + callback_info = (struct endpoint_reset_callback *) reset_completion->data; + xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", + (unsigned int) GET_COMP_CODE(event->status)); + list_del(&reset_completion->cmd_list); + /* Update command and event ring dequeue pointer before calling callback */ + inc_deq(xhci, xhci->cmd_ring, false); + inc_deq(xhci, xhci->event_ring, true); + xhci_set_hc_event_deq(xhci); + + spin_unlock(&xhci->lock); + callback_info->callback(callback_info); + /* callback_info is freed by the driver */ + spin_lock(&xhci->lock); + kfree(reset_completion); +} + +/* Returns 1 if the event ring has already been updated */ +static int handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { int slot_id = TRB_TO_SLOT_ID(event->flags); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; + int ret = 0; cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, @@ -617,12 +649,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, /* Is the command ring deq ptr out of sync with the deq seg ptr? */ if (cmd_dequeue_dma == 0) { xhci->error_bitmask |= 1 << 4; - return; + return ret; } /* Does the DMA address match our internal dequeue pointer address? */ if (cmd_dma != (u64) cmd_dequeue_dma) { xhci->error_bitmask |= 1 << 5; - return; + return ret; } switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_ENABLE_SLOT): @@ -653,12 +685,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_TYPE(TRB_CMD_NOOP): ++xhci->noops_handled; break; + case TRB_TYPE(TRB_RESET_EP): + handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); + ret = 1; + break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; break; } - inc_deq(xhci, xhci->cmd_ring, false); + + if (!ret) + inc_deq(xhci, xhci->cmd_ring, false); + return ret; } static void handle_port_status(struct xhci_hcd *xhci, @@ -1030,7 +1069,8 @@ void xhci_handle_event(struct xhci_hcd *xhci) /* FIXME: Handle more event types. */ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { case TRB_TYPE(TRB_COMPLETION): - handle_cmd_completion(xhci, &event->event_cmd); + if (handle_cmd_completion(xhci, &event->event_cmd)) + update_ptrs = 0; break; case TRB_TYPE(TRB_PORT_STATUS): handle_port_status(xhci, event); @@ -1656,3 +1696,13 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, return queue_command(xhci, (u32) addr | cycle_state, 0, 0, trb_slot_id | trb_ep_index | type); } + +int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, + unsigned int ep_index) +{ + u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); + u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 type = TRB_TYPE(TRB_RESET_EP); + + return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); +} diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8936eeb..b721e6f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -609,6 +609,11 @@ struct xhci_device_control { #define ADD_EP(x) (0x1 << x) +struct xhci_command_completion { + struct list_head cmd_list; + void *data; +}; + struct xhci_virt_device { /* * Commands to the hardware are passed an "input context" that @@ -629,6 +634,14 @@ struct xhci_virt_device { * have to restore the device state to the previous state */ struct xhci_ring *new_ep_rings[31]; + /* + * "The Command Completion Events that result from processing the + * commands shall be ordered with respect to their location in + * the Command Ring." If we place a new command completion list item + * at the tail of the list, the head of the list is the first command + * submitted to the hardware. + */ + struct list_head cmd_list; struct completion cmd_completion; /* Status of the last command issued for this device */ u32 cmd_status; @@ -848,8 +861,8 @@ union xhci_trb { #define TRB_CONFIG_EP 12 /* Evaluate Context Command */ #define TRB_EVAL_CONTEXT 13 -/* Reset Transfer Ring Command */ -#define TRB_RESET_RING 14 +/* Reset Endpoint Command */ +#define TRB_RESET_EP 14 /* Stop Transfer Ring Command */ #define TRB_STOP_RING 15 /* Set Transfer Ring Dequeue Pointer Command */ @@ -1128,8 +1141,10 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); +void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_start_endpoint_reset(struct usb_hcd *hcd, struct endpoint_reset_callback *callback); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); @@ -1148,6 +1163,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); +int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, + unsigned int ep_index); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, -- 1.5.6.5 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html