Re: [PATCH 4/5] xHCI: Enable multiple interrupters

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Jun 10, 2011 at 04:51:42PM +0800, Andiry Xu wrote:
> This patch enables all the interrupters allocated, so devices can use
> different interrupters and got interrupt from them. When an interrupt
> is triggered, the irq handler only checks the corresponding interrupter
> and event ring.
> 
> When using MSI-X, the number of interrupters enabled will be equal to
> xhci->msix_count; When using MSI or PCI INTx, the number of interrupters
> enabled will be 1.

Why only allocate one event ring when only MSI, not MSI-X is enabled?
You should be able to use multiple event rings, but all the interrupts
will be directed to the same CPU, correct?  I think you could still see
performance gains if you have an MSI-only host but you could direct
different types of USB traffic to different event rings with different
interrupt modulation rates.

I think this was something Matthew wanted for the UAS driver for the
hosts that only supported MSI.  For those hosts, could you register,
say, 5 event rings with the same interrupter?  Matthew, do you think
that's enough, or do you want more?

> When doing system suspend and resume, register of all the interrupters enabled
> will be saved and restored.
> 
> Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
> ---
>  drivers/usb/host/xhci-mem.c  |    9 +++
>  drivers/usb/host/xhci-ring.c |   78 +++++++++++----------
>  drivers/usb/host/xhci.c      |  160 +++++++++++++++++++++++++-----------------
>  drivers/usb/host/xhci.h      |   10 ++--
>  4 files changed, 151 insertions(+), 106 deletions(-)
> 
> diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
> index a0f960e..f1f64a4 100644
> --- a/drivers/usb/host/xhci-mem.c
> +++ b/drivers/usb/host/xhci-mem.c
> @@ -1515,6 +1515,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
>  		xhci_dbg(xhci, "Freed event ring\n");
>  		kfree(xhci->erst[i]);
>  		xhci_dbg(xhci, "Freed ERST\n");
> +		kfree(xhci->s3.ir_set[i]);
> +		xhci_dbg(xhci, "Freed intr reg %d for suspend\n", i);
>  	}
>  
>  	xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
> @@ -2067,6 +2069,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
>  
>  		xhci_dbg(xhci, "// Allocating event ring %d\n", i);
>  		xhci->erst[i]->xhci = xhci;
> +		xhci->erst[i]->erst_num = i;
>  		xhci->erst[i]->ir_set = &xhci->run_regs->ir_set[i];
>  		xhci->erst[i]->ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS,
>  							false, flags);
> @@ -2075,6 +2078,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
>  		if (xhci_check_trb_in_td_math(xhci, flags, i) < 0)
>  			goto fail;
>  
> +		xhci_dbg(xhci, "// Allocating intr reg %d for suspend\n", i);
> +		xhci->s3.ir_set[i] = kzalloc(sizeof(struct xhci_intr_reg),
> +						flags);
> +		if (!xhci->s3.ir_set[i])
> +			goto fail;
> +
>  		xhci->erst[i]->entries = pci_alloc_consistent(to_pci_dev(dev),
>  			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
>  		if (!xhci->erst[i]->entries)
> diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
> index cd3bad9..fc3c9d8 100644
> --- a/drivers/usb/host/xhci-ring.c
> +++ b/drivers/usb/host/xhci-ring.c
> @@ -94,11 +94,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
>   * or was the previous TRB the last TRB on the last segment in the ERST?
>   */
>  static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
> -		struct xhci_segment *seg, union xhci_trb *trb)
> +		struct xhci_segment *seg, union xhci_trb *trb, int erst_num)
>  {
> -	if (ring == xhci->erst[0]->ring)
> +	if (ring == xhci->erst[erst_num]->ring)
>  		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
> -			(seg->next == xhci->erst[0]->ring->first_seg);
> +			(seg->next == xhci->erst[erst_num]->ring->first_seg);
>  	else
>  		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
>  }
> @@ -108,9 +108,9 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
>   * event seg?
>   */
>  static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
> -		struct xhci_segment *seg, union xhci_trb *trb)
> +		struct xhci_segment *seg, union xhci_trb *trb, int erst_num)
>  {
> -	if (ring == xhci->erst[0]->ring)
> +	if (ring == xhci->erst[erst_num]->ring)
>  		return trb == &seg->trbs[TRBS_PER_SEGMENT];
>  	else
>  		return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
> @@ -133,7 +133,7 @@ static void next_trb(struct xhci_hcd *xhci,
>  		struct xhci_segment **seg,
>  		union xhci_trb **trb)
>  {
> -	if (last_trb(xhci, ring, *seg, *trb)) {
> +	if (last_trb(xhci, ring, *seg, *trb, 0)) {
>  		*seg = (*seg)->next;
>  		*trb = ((*seg)->trbs);
>  	} else {
> @@ -145,7 +145,8 @@ static void next_trb(struct xhci_hcd *xhci,
>   * See Cycle bit rules. SW is the consumer for the event ring only.
>   * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
>   */
> -static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
> +static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring,
> +			bool consumer, int erst_num)
>  {
>  	union xhci_trb *next = ++(ring->dequeue);
>  	unsigned long long addr;
> @@ -154,8 +155,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
>  	/* Update the dequeue pointer further if that was a link TRB or we're at
>  	 * the end of an event ring segment (which doesn't have link TRBS)
>  	 */
> -	while (last_trb(xhci, ring, ring->deq_seg, next)) {
> -		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
> +	while (last_trb(xhci, ring, ring->deq_seg, next, erst_num)) {
> +		if (consumer && last_trb_on_last_seg(xhci, ring,
> +				ring->deq_seg, next, erst_num)) {
>  			ring->cycle_state = (ring->cycle_state ? 0 : 1);
>  			if (!in_interrupt())
>  				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
> @@ -187,7 +189,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
>   *			prepare_transfer()?
>   */
>  static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
> -		bool consumer, bool more_trbs_coming)
> +		bool consumer, bool more_trbs_coming, int erst_num)
>  {
>  	u32 chain;
>  	union xhci_trb *next;
> @@ -200,9 +202,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
>  	/* Update the dequeue pointer further if that was a link TRB or we're at
>  	 * the end of an event ring segment (which doesn't have link TRBS)
>  	 */
> -	while (last_trb(xhci, ring, ring->enq_seg, next)) {
> +	while (last_trb(xhci, ring, ring->enq_seg, next, erst_num)) {
>  		if (!consumer) {
> -			if (ring != xhci->erst[0]->ring) {
> +			if (ring != xhci->erst[erst_num]->ring) {
>  				/*
>  				 * If the caller doesn't plan on enqueueing more
>  				 * TDs before ringing the doorbell, then we
> @@ -229,7 +231,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
>  				next->link.control ^= cpu_to_le32(TRB_CYCLE);
>  			}
>  			/* Toggle the cycle bit after the last ring segment. */
> -			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
> +			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg,
> +					next, erst_num)) {
>  				ring->cycle_state = (ring->cycle_state ? 0 : 1);
>  				if (!in_interrupt())
>  					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
> @@ -261,7 +264,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
>  
>  	/* If we are currently pointing to a link TRB, advance the
>  	 * enqueue pointer before checking for space */
> -	while (last_trb(xhci, ring, enq_seg, enq)) {
> +	while (last_trb(xhci, ring, enq_seg, enq, 0)) {
>  		enq_seg = enq_seg->next;
>  		enq = enq_seg->trbs;
>  	}
> @@ -289,7 +292,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
>  		if (enq == ring->dequeue)
>  			return 0;
>  		enq++;
> -		while (last_trb(xhci, ring, enq_seg, enq)) {
> +		while (last_trb(xhci, ring, enq_seg, enq, 0)) {
>  			enq_seg = enq_seg->next;
>  			enq = enq_seg->trbs;
>  		}
> @@ -1181,7 +1184,7 @@ bandwidth_change:
>  		xhci->error_bitmask |= 1 << 6;
>  		break;
>  	}
> -	inc_deq(xhci, xhci->cmd_ring, false);
> +	inc_deq(xhci, xhci->cmd_ring, false, 0);
>  }
>  
>  static void handle_vendor_event(struct xhci_hcd *xhci,
> @@ -1233,7 +1236,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
>  }
>  
>  static void handle_port_status(struct xhci_hcd *xhci,
> -		union xhci_trb *event)
> +		union xhci_trb *event, int erst_num)
>  {
>  	struct usb_hcd *hcd;
>  	u32 port_id;
> @@ -1346,7 +1349,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
>  
>  cleanup:
>  	/* Update event ring dequeue pointer before dropping the lock */
> -	inc_deq(xhci, xhci->erst[0]->ring, true);
> +	inc_deq(xhci, xhci->erst[erst_num]->ring, true, erst_num);
>  
>  	/* Don't make the USB core poll the roothub if we got a bad port status
>  	 * change event.  Besides, at that point we can't tell which roothub
> @@ -1540,8 +1543,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
>  		} else {
>  			/* Update ring dequeue pointer */
>  			while (ep_ring->dequeue != td->last_trb)
> -				inc_deq(xhci, ep_ring, false);
> -			inc_deq(xhci, ep_ring, false);
> +				inc_deq(xhci, ep_ring, false, 0);
> +			inc_deq(xhci, ep_ring, false, 0);
>  		}
>  
>  td_cleanup:
> @@ -1595,7 +1598,7 @@ td_cleanup:
>   */
>  static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
>  	union xhci_trb *event_trb, struct xhci_transfer_event *event,
> -	struct xhci_virt_ep *ep, int *status)
> +	struct xhci_virt_ep *ep, int *status, int erst_num)
>  {
>  	struct xhci_virt_device *xdev;
>  	struct xhci_ring *ep_ring;
> @@ -1611,7 +1614,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
>  	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
>  	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
>  
> -	xhci_debug_trb(xhci, xhci->erst[0]->ring->dequeue);
> +	xhci_debug_trb(xhci, xhci->erst[erst_num]->ring->dequeue);
>  	switch (trb_comp_code) {
>  	case COMP_SUCCESS:
>  		if (event_trb == ep_ring->dequeue) {
> @@ -1796,8 +1799,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
>  
>  	/* Update ring dequeue pointer */
>  	while (ep_ring->dequeue != td->last_trb)
> -		inc_deq(xhci, ep_ring, false);
> -	inc_deq(xhci, ep_ring, false);
> +		inc_deq(xhci, ep_ring, false, 0);
> +	inc_deq(xhci, ep_ring, false, 0);
>  
>  	return finish_td(xhci, td, NULL, event, ep, status, true);
>  }
> @@ -1913,7 +1916,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
>   * At this point, the host controller is probably hosed and should be reset.
>   */
>  static int handle_tx_event(struct xhci_hcd *xhci,
> -		struct xhci_transfer_event *event)
> +		struct xhci_transfer_event *event, int erst_num)
>  {
>  	struct xhci_virt_device *xdev;
>  	struct xhci_virt_ep *ep;
> @@ -2117,7 +2120,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
>  		 */
>  		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
>  			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
> -						 &status);
> +						 &status, erst_num);
>  		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
>  			ret = process_isoc_td(xhci, td, event_trb, event, ep,
>  						 &status);
> @@ -2131,7 +2134,8 @@ cleanup:
>  		 * Will roll back to continue process missed tds.
>  		 */
>  		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
> -			inc_deq(xhci, xhci->erst[0]->ring, true);
> +			inc_deq(xhci, xhci->erst[erst_num]->ring, true,
> +				erst_num);
>  		}
>  
>  		if (ret) {
> @@ -2179,7 +2183,7 @@ cleanup:
>   * Returns >0 for "possibly more events to process" (caller should call again),
>   * otherwise 0 if done.  In future, <0 returns should indicate error code.
>   */
> -static int xhci_handle_event(struct xhci_erst *erst)
> +static int xhci_handle_event(struct xhci_erst *erst, int erst_num)
>  {
>  	union xhci_trb *event;
>  	int update_ptrs = 1;
> @@ -2209,11 +2213,12 @@ static int xhci_handle_event(struct xhci_erst *erst)
>  		handle_cmd_completion(erst->xhci, &event->event_cmd);
>  		break;
>  	case TRB_TYPE(TRB_PORT_STATUS):
> -		handle_port_status(erst->xhci, event);
> +		handle_port_status(erst->xhci, event, erst_num);
>  		update_ptrs = 0;
>  		break;
>  	case TRB_TYPE(TRB_TRANSFER):
> -		ret = handle_tx_event(erst->xhci, &event->trans_event);
> +		ret = handle_tx_event(erst->xhci, &event->trans_event,
> +					erst_num);
>  		if (ret < 0)
>  			erst->xhci->error_bitmask |= 1 << 9;
>  		else
> @@ -2237,7 +2242,7 @@ static int xhci_handle_event(struct xhci_erst *erst)
>  
>  	if (update_ptrs)
>  		/* Update SW event ring dequeue pointer */
> -		inc_deq(erst->xhci, erst->ring, true);
> +		inc_deq(erst->xhci, erst->ring, true, erst_num);
>  
>  	/* Are there more items on the event ring?  Caller will call us again to
>  	 * check.
> @@ -2315,7 +2320,7 @@ hw_died:
>  	/* FIXME this should be a delayed service routine
>  	 * that clears the EHB.
>  	 */
> -	while (xhci_handle_event(xhci->erst[0]) > 0) {}
> +	while (xhci_handle_event(xhci->erst[0], 0) > 0) {}
>  
>  	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
>  	/* If necessary, update the HW's version of the event ring deq ptr. */
> @@ -2351,7 +2356,7 @@ irqreturn_t xhci_msi_irq(int irq, void *dev_id)
>  		set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
>  
>  	spin_lock(&xhci->lock);
> -	while (xhci_handle_event(erst) > 0) {
> +	while (xhci_handle_event(erst, erst->erst_num) > 0) {
>  		ret = IRQ_HANDLED;
>  	}
>  	spin_unlock(&xhci->lock);
> @@ -2386,7 +2391,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
>  	trb->field[1] = cpu_to_le32(field2);
>  	trb->field[2] = cpu_to_le32(field3);
>  	trb->field[3] = cpu_to_le32(field4);
> -	inc_enq(xhci, ring, consumer, more_trbs_coming);
> +	inc_enq(xhci, ring, consumer, more_trbs_coming, 0);
>  }
>  
>  /*
> @@ -2435,7 +2440,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
>  
>  		next = ring->enqueue;
>  
> -		while (last_trb(xhci, ring, ring->enq_seg, next)) {
> +		while (last_trb(xhci, ring, ring->enq_seg, next, 0)) {
>  			/* If we're not dealing with 0.95 hardware,
>  			 * clear the chain bit.
>  			 */
> @@ -2448,7 +2453,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
>  			next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
>  
>  			/* Toggle the cycle bit after the last ring segment. */
> -			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
> +			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg,
> +					next, 0)) {
>  				ring->cycle_state = (ring->cycle_state ? 0 : 1);
>  				if (!in_interrupt()) {
>  					xhci_dbg(xhci, "queue_trb: Toggle cycle "
> diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
> index eb6e33c..1a63a9c 100644
> --- a/drivers/usb/host/xhci.c
> +++ b/drivers/usb/host/xhci.c
> @@ -192,7 +192,7 @@ static void xhci_free_irq(struct xhci_hcd *xhci)
>  		for (i = 0; i < xhci->msix_count; i++)
>  			if (xhci->msix_entries[i].vector)
>  				free_irq(xhci->msix_entries[i].vector,
> -						xhci->erst[0]);
> +						xhci->erst[i]);
>  	} else if (pdev->irq >= 0)
>  		free_irq(pdev->irq, xhci->erst[0]);
>  
> @@ -220,6 +220,7 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
>  		pci_disable_msi(pdev);
>  	}
>  
> +	xhci->intr_num = 1;
>  	return ret;
>  }
>  
> @@ -253,12 +254,13 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
>  
>  	for (i = 0; i < xhci->msix_count; i++) {
>  		ret = request_irq(xhci->msix_entries[i].vector, xhci_msi_irq,
> -					0, "xhci_hcd", xhci->erst[0]);
> +					0, "xhci_hcd", xhci->erst[i]);
>  		if (ret)
>  			goto disable_msix;
>  	}
>  
>  	hcd->msix_enabled = 1;
> +	xhci->intr_num = xhci->msix_count;
>  	return ret;
>  
>  disable_msix:
> @@ -329,8 +331,6 @@ static void xhci_event_ring_work(unsigned long arg)
>  	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
>  	int i, j;
>  
> -	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
> -
>  	spin_lock_irqsave(&xhci->lock, flags);
>  	temp = xhci_readl(xhci, &xhci->op_regs->status);
>  	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
> @@ -340,16 +340,23 @@ static void xhci_event_ring_work(unsigned long arg)
>  		return;
>  	}
>  
> -	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
> -	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
> -	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
> -	xhci->error_bitmask = 0;
> -	xhci_dbg(xhci, "Event ring:\n");
> -	xhci_debug_segment(xhci, xhci->erst[0]->ring->deq_seg);
> -	xhci_dbg_ring_ptrs(xhci, xhci->erst[0]->ring);
> -	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
> -	temp_64 &= ~ERST_PTR_MASK;
> -	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
> +	for (i = 0; i < xhci->intr_num; i++) {
> +		xhci_dbg(xhci, "Poll event ring %d: %lu\n", i, jiffies);
> +		temp = xhci_readl(xhci, &xhci->erst[i]->ir_set->irq_pending);
> +		xhci_dbg(xhci, "ir_set %d pending = 0x%x\n", i, temp);
> +		xhci_dbg(xhci, "HC error bitmask = 0x%x\n",
> +				xhci->error_bitmask);
> +		xhci->error_bitmask = 0;
> +		xhci_dbg(xhci, "Event ring:\n");
> +		xhci_debug_segment(xhci, xhci->erst[i]->ring->deq_seg);
> +		xhci_dbg_ring_ptrs(xhci, xhci->erst[i]->ring);
> +		temp_64 = xhci_read_64(xhci,
> +				&xhci->erst[i]->ir_set->erst_dequeue);
> +		temp_64 &= ~ERST_PTR_MASK;
> +		xhci_dbg(xhci, "ERST %d deq = 64'h%0lx\n", i,
> +				(long unsigned int) temp_64);
> +	}
> +
>  	xhci_dbg(xhci, "Command ring:\n");
>  	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
>  	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
> @@ -404,6 +411,7 @@ int xhci_run(struct usb_hcd *hcd)
>  	u32 ret;
>  	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
>  	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
> +	int i;
>  
>  	/* Start the xHCI host controller running only after the USB 2.0 roothub
>  	 * is setup.
> @@ -441,6 +449,7 @@ legacy_irq:
>  			return ret;
>  		}
>  		hcd->irq = pdev->irq;
> +		xhci->intr_num = 1;
>  	}
>  
>  #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
> @@ -459,21 +468,6 @@ legacy_irq:
>  	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
>  	xhci_dbg_cmd_ptrs(xhci);
>  
> -	xhci_dbg(xhci, "ERST memory map follows:\n");
> -	xhci_dbg_erst(xhci, xhci->erst[0]);
> -	xhci_dbg(xhci, "Event ring:\n");
> -	xhci_debug_ring(xhci, xhci->erst[0]->ring);
> -	xhci_dbg_ring_ptrs(xhci, xhci->erst[0]->ring);
> -	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
> -	temp_64 &= ~ERST_PTR_MASK;
> -	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
> -
> -	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
> -	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_control);
> -	temp &= ~ER_IRQ_INTERVAL_MASK;
> -	temp |= (u32) 160;
> -	xhci_writel(xhci, temp, &xhci->erst[0]->ir_set->irq_control);
> -
>  	/* Set the HCD state before we enable the irqs */
>  	temp = xhci_readl(xhci, &xhci->op_regs->command);
>  	temp |= (CMD_EIE);
> @@ -481,13 +475,33 @@ legacy_irq:
>  			temp);
>  	xhci_writel(xhci, temp, &xhci->op_regs->command);
>  
> -	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
> -	xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
> -			xhci->erst[0]->ir_set,
> -			(unsigned int) ER_IRQ_ENABLE(temp));
> -	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
> -			&xhci->erst[0]->ir_set->irq_pending);
> -	xhci_print_ir_set(xhci, 0);
> +	for (i = 0; i < xhci->intr_num; i++) {
> +		xhci_dbg(xhci, "ERST memory map follows:\n");
> +		xhci_dbg_erst(xhci, xhci->erst[i]);
> +		xhci_dbg(xhci, "Event ring %d:\n", i);
> +		xhci_debug_ring(xhci, xhci->erst[i]->ring);
> +		xhci_dbg_ring_ptrs(xhci, xhci->erst[i]->ring);
> +		temp_64 = xhci_read_64(xhci,
> +				&xhci->erst[i]->ir_set->erst_dequeue);
> +		temp_64 &= ~ERST_PTR_MASK;
> +		xhci_dbg(xhci, "ERST %d deq = 64'h%0lx\n", i,
> +				(long unsigned int) temp_64);
> +
> +		xhci_dbg(xhci, "// Set the interrupt modulation register\n");
> +		temp = xhci_readl(xhci, &xhci->erst[i]->ir_set->irq_control);
> +		temp &= ~ER_IRQ_INTERVAL_MASK;
> +		temp |= (u32) 160;
> +		xhci_writel(xhci, temp, &xhci->erst[i]->ir_set->irq_control);
> +
> +		temp = xhci_readl(xhci, &xhci->erst[i]->ir_set->irq_pending);
> +		xhci_dbg(xhci, "// Enabling event ring interrupter %p "
> +				"by writing 0x%x to irq_pending\n",
> +				xhci->erst[i]->ir_set,
> +				(unsigned int) ER_IRQ_ENABLE(temp));
> +		xhci_writel(xhci, ER_IRQ_ENABLE(temp),
> +				&xhci->erst[i]->ir_set->irq_pending);
> +		xhci_print_ir_set(xhci, i);
> +	}
>  
>  	if (xhci->quirks & XHCI_NEC_HOST)
>  		xhci_queue_vendor_command(xhci, 0, 0, 0,
> @@ -525,6 +539,7 @@ void xhci_stop(struct usb_hcd *hcd)
>  {
>  	u32 temp;
>  	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
> +	int i;
>  
>  	if (!usb_hcd_is_primary_hcd(hcd)) {
>  		xhci_only_stop_hcd(xhci->shared_hcd);
> @@ -551,12 +566,14 @@ void xhci_stop(struct usb_hcd *hcd)
>  		usb_amd_dev_put();
>  
>  	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
> -	temp = xhci_readl(xhci, &xhci->op_regs->status);
> -	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
> -	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
> -	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
> -			&xhci->erst[0]->ir_set->irq_pending);
> -	xhci_print_ir_set(xhci, 0);
> +	for (i = 0; i < xhci->intr_num; i++) {
> +		temp = xhci_readl(xhci, &xhci->op_regs->status);
> +		xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
> +		temp = xhci_readl(xhci, &xhci->erst[i]->ir_set->irq_pending);
> +		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
> +				&xhci->erst[i]->ir_set->irq_pending);
> +		xhci_print_ir_set(xhci, i);
> +	}
>  
>  	xhci_dbg(xhci, "cleaning up memory\n");
>  	xhci_mem_cleanup(xhci);
> @@ -590,36 +607,44 @@ void xhci_shutdown(struct usb_hcd *hcd)
>  #ifdef CONFIG_PM
>  static void xhci_save_registers(struct xhci_hcd *xhci)
>  {
> +	int i;
> +
>  	xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
>  	xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
>  	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
>  	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
> -	xhci->s3.irq_pending = xhci_readl(xhci,
> -					&xhci->erst[0]->ir_set->irq_pending);
> -	xhci->s3.irq_control = xhci_readl(xhci,
> -					&xhci->erst[0]->ir_set->irq_control);
> -	xhci->s3.erst_size = xhci_readl(xhci,
> -					&xhci->erst[0]->ir_set->erst_size);
> -	xhci->s3.erst_base = xhci_read_64(xhci,
> -					&xhci->erst[0]->ir_set->erst_base);
> -	xhci->s3.erst_dequeue = xhci_read_64(xhci,
> -					&xhci->erst[0]->ir_set->erst_dequeue);
> +	for (i = 0; i < xhci->intr_num; i++) {
> +		xhci->s3.ir_set[i]->irq_pending = xhci_readl(xhci,
> +					&xhci->erst[i]->ir_set->irq_pending);
> +		xhci->s3.ir_set[i]->irq_control = xhci_readl(xhci,
> +					&xhci->erst[i]->ir_set->irq_control);
> +		xhci->s3.ir_set[i]->erst_size = xhci_readl(xhci,
> +					&xhci->erst[i]->ir_set->erst_size);
> +		xhci->s3.ir_set[i]->erst_base = xhci_read_64(xhci,
> +					&xhci->erst[i]->ir_set->erst_base);
> +		xhci->s3.ir_set[i]->erst_dequeue = xhci_read_64(xhci,
> +					&xhci->erst[i]->ir_set->erst_dequeue);
> +	}
>  }
>  
>  static void xhci_restore_registers(struct xhci_hcd *xhci)
>  {
> +	int i;
> +
>  	xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
>  	xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
>  	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
>  	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
> -	xhci_writel(xhci, xhci->s3.irq_pending,
> -					&xhci->erst[0]->ir_set->irq_pending);
> -	xhci_writel(xhci, xhci->s3.irq_control,
> -					&xhci->erst[0]->ir_set->irq_control);
> -	xhci_writel(xhci, xhci->s3.erst_size,
> -					&xhci->erst[0]->ir_set->erst_size);
> -	xhci_write_64(xhci, xhci->s3.erst_base,
> -					&xhci->erst[0]->ir_set->erst_base);
> +	for (i = 0; i < xhci->intr_num; i++) {
> +		xhci_writel(xhci, xhci->s3.ir_set[i]->irq_pending,
> +					&xhci->erst[i]->ir_set->irq_pending);
> +		xhci_writel(xhci, xhci->s3.ir_set[i]->irq_control,
> +					&xhci->erst[i]->ir_set->irq_control);
> +		xhci_writel(xhci, xhci->s3.ir_set[i]->erst_size,
> +					&xhci->erst[i]->ir_set->erst_size);
> +		xhci_write_64(xhci, xhci->s3.ir_set[i]->erst_base,
> +					&xhci->erst[i]->ir_set->erst_base);
> +	}
>  }
>  
>  static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
> @@ -729,7 +754,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
>  	/* step 5: remove core well power */
>  	/* synchronize irq when using MSI-X */
>  	if (xhci->msix_entries) {
> -		for (i = 0; i < xhci->msix_count; i++)
> +		for (i = 0; i < xhci->intr_num; i++)
>  			synchronize_irq(xhci->msix_entries[i].vector);
>  	}
>  
> @@ -748,6 +773,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
>  	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
>  	struct usb_hcd		*secondary_hcd;
>  	int			retval;
> +	int			i;
>  
>  	/* Wait a bit if either of the roothubs need to settle from the
>  	 * transition into bus suspend.
> @@ -799,10 +825,14 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
>  		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
>  		temp = xhci_readl(xhci, &xhci->op_regs->status);
>  		xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
> -		temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
> -		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
> -				&xhci->erst[0]->ir_set->irq_pending);
> -		xhci_print_ir_set(xhci, 0);
> +
> +		for (i = 0; i < xhci->intr_num; i++) {
> +			temp = xhci_readl(xhci,
> +					&xhci->erst[i]->ir_set->irq_pending);
> +			xhci_writel(xhci, ER_IRQ_DISABLE(temp),
> +					&xhci->erst[i]->ir_set->irq_pending);
> +			xhci_print_ir_set(xhci, i);
> +		}
>  
>  		xhci_dbg(xhci, "cleaning up memory\n");
>  		xhci_mem_cleanup(xhci);
> diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
> index 14b575b..a18cf6b 100644
> --- a/drivers/usb/host/xhci.h
> +++ b/drivers/usb/host/xhci.h
> @@ -1149,6 +1149,8 @@ struct xhci_erst {
>  	dma_addr_t		erst_dma_addr;
>  	/* Num entries the ERST can contain */
>  	unsigned int		erst_size;
> +	/* The ERST number */
> +	unsigned int		erst_num;
>  };
>  
>  struct xhci_scratchpad {
> @@ -1185,11 +1187,7 @@ struct s3_save {
>  	u32	dev_nt;
>  	u64	dcbaa_ptr;
>  	u32	config_reg;
> -	u32	irq_pending;
> -	u32	irq_control;
> -	u32	erst_size;
> -	u64	erst_base;
> -	u64	erst_dequeue;
> +	struct	xhci_intr_reg *ir_set[32];
>  };
>  
>  struct xhci_bus_state {
> @@ -1245,6 +1243,8 @@ struct xhci_hcd {
>  	/* msi-x vectors */
>  	int		msix_count;
>  	struct msix_entry	*msix_entries;
> +	/* number of interrupters enabled */
> +	int		intr_num;
>  	/* data structures */
>  	struct xhci_device_context_array *dcbaa;
>  	struct xhci_ring	*cmd_ring;
> -- 
> 1.7.1
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux