[PATCH 3/5] xHCI: Allocate multiple ERST

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



xHCI can support multiple interrupters, which needs multiple erst and
event rings, so device can decide which interrupter it will use and has
its events reported on the corresponding event ring.

This patch modifies xhci memory initialization and cleanup part to allocate
multiple erst, but still only use the first erst and event ring, which will
be modified by later patches.

The number of erst allocated is equal to the number of MSI-X vectors and up
to 32.

Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
---
 drivers/usb/host/xhci-mem.c  |  256 +++++++++++++++++++++++------------------
 drivers/usb/host/xhci-ring.c |   37 +++---
 drivers/usb/host/xhci.c      |   77 ++++++-------
 drivers/usb/host/xhci.h      |    2 +-
 4 files changed, 200 insertions(+), 172 deletions(-)

diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 89fb1dc..a0f960e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1495,22 +1495,27 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 	int i;
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
-	if (xhci->erst->ir_set) {
-		xhci_writel(xhci, 0, &xhci->erst->ir_set->erst_size);
-		xhci_write_64(xhci, 0, &xhci->erst->ir_set->erst_base);
-		xhci_write_64(xhci, 0, &xhci->erst->ir_set->erst_dequeue);
+	for (i = 0; i < xhci->msix_count; i++) {
+		if (xhci->erst[i]->ir_set) {
+			xhci_writel(xhci, 0, &xhci->erst[i]->ir_set->erst_size);
+			xhci_write_64(xhci, 0,
+					&xhci->erst[i]->ir_set->erst_base);
+			xhci_write_64(xhci, 0,
+					&xhci->erst[i]->ir_set->erst_dequeue);
+		}
+		size = sizeof(struct xhci_erst_entry)*
+				(xhci->erst[i]->num_entries);
+		if (xhci->erst[i]->entries)
+			pci_free_consistent(pdev, size,	xhci->erst[i]->entries,
+					xhci->erst[i]->erst_dma_addr);
+		xhci->erst[i]->entries = NULL;
+		if (xhci->erst[i]->ring)
+			xhci_ring_free(xhci, xhci->erst[i]->ring);
+		xhci->erst[i]->ring = NULL;
+		xhci_dbg(xhci, "Freed event ring\n");
+		kfree(xhci->erst[i]);
+		xhci_dbg(xhci, "Freed ERST\n");
 	}
-	size = sizeof(struct xhci_erst_entry)*(xhci->erst->num_entries);
-	if (xhci->erst->entries)
-		pci_free_consistent(pdev, size,
-				xhci->erst->entries, xhci->erst->erst_dma_addr);
-	xhci->erst->entries = NULL;
-	if (xhci->erst->ring)
-		xhci_ring_free(xhci, xhci->erst->ring);
-	xhci->erst->ring = NULL;
-	xhci_dbg(xhci, "Freed event ring\n");
-	kfree(xhci->erst);
-	xhci_dbg(xhci, "Freed ERST\n");
 
 	xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
 	if (xhci->cmd_ring)
@@ -1596,7 +1601,8 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
 }
 
 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci,
+					gfp_t mem_flags, int index)
 {
 	struct {
 		dma_addr_t		input_dma;
@@ -1605,20 +1611,21 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 		/* A zeroed DMA field should fail */
 		{ 0, NULL },
 		/* One TRB before the ring start should fail */
-		{ xhci->erst->ring->first_seg->dma - 16, NULL },
+		{ xhci->erst[index]->ring->first_seg->dma - 16, NULL },
 		/* One byte before the ring start should fail */
-		{ xhci->erst->ring->first_seg->dma - 1, NULL },
+		{ xhci->erst[index]->ring->first_seg->dma - 1, NULL },
 		/* Starting TRB should succeed */
-		{ xhci->erst->ring->first_seg->dma,
-			xhci->erst->ring->first_seg },
+		{ xhci->erst[index]->ring->first_seg->dma,
+			xhci->erst[index]->ring->first_seg },
 		/* Ending TRB should succeed */
-		{ xhci->erst->ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
-			xhci->erst->ring->first_seg },
+		{ xhci->erst[index]->ring->first_seg->dma +
+				(TRBS_PER_SEGMENT - 1)*16,
+			xhci->erst[index]->ring->first_seg },
 		/* One byte after the ring end should fail */
-		{ xhci->erst->ring->first_seg->dma +
+		{ xhci->erst[index]->ring->first_seg->dma +
 			(TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
 		/* One TRB after the ring end should fail */
-		{ xhci->erst->ring->first_seg->dma +
+		{ xhci->erst[index]->ring->first_seg->dma +
 			(TRBS_PER_SEGMENT)*16, NULL },
 		/* An address of all ones should fail */
 		{ (dma_addr_t) (~0), NULL },
@@ -1631,59 +1638,62 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 		struct xhci_segment	*result_seg;
 	} complex_test_vector [] = {
 		/* Test feeding a valid DMA address from a different ring */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = xhci->erst->ring->first_seg->trbs,
-			.end_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = xhci->erst[index]->ring->first_seg->trbs,
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* Test feeding a valid end TRB from a different ring */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = xhci->erst->ring->first_seg->trbs,
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = xhci->erst[index]->ring->first_seg->trbs,
 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* Test feeding a valid start and end TRB from a different ring */
-		{	.input_seg = xhci->erst->ring->first_seg,
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
 			.start_trb = xhci->cmd_ring->first_seg->trbs,
 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but after this TD */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = &xhci->erst->ring->first_seg->trbs[0],
-			.end_trb = &xhci->erst->ring->first_seg->trbs[3],
-			.input_dma = xhci->erst->ring->first_seg->dma + 4*16,
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = &xhci->erst[index]->ring->first_seg->trbs[0],
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[3],
+			.input_dma = xhci->erst[index]->ring->first_seg->dma +
+					4*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but before this TD */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = &xhci->erst->ring->first_seg->trbs[3],
-			.end_trb = &xhci->erst->ring->first_seg->trbs[6],
-			.input_dma = xhci->erst->ring->first_seg->dma + 2*16,
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = &xhci->erst[index]->ring->first_seg->trbs[3],
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[6],
+			.input_dma = xhci->erst[index]->ring->first_seg->dma +
+					2*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but after this wrapped TD */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->erst->ring->first_seg->trbs[1],
-			.input_dma = xhci->erst->ring->first_seg->dma + 2*16,
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = &xhci->erst[index]->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[1],
+			.input_dma = xhci->erst[index]->ring->first_seg->dma +
+					2*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but before this wrapped TD */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->erst->ring->first_seg->trbs[1],
-			.input_dma = xhci->erst->ring->first_seg->dma +
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = &xhci->erst[index]->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[1],
+			.input_dma = xhci->erst[index]->ring->first_seg->dma +
 					(TRBS_PER_SEGMENT - 4)*16,
 			.result_seg = NULL,
 		},
 		/* TRB not in this ring, and we have a wrapped TD */
-		{	.input_seg = xhci->erst->ring->first_seg,
-			.start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->erst->ring->first_seg->trbs[1],
+		{	.input_seg = xhci->erst[index]->ring->first_seg,
+			.start_trb = &xhci->erst[index]->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->erst[index]->ring->first_seg->trbs[1],
 			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
 			.result_seg = NULL,
 		},
@@ -1695,9 +1705,9 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 	num_tests = ARRAY_SIZE(simple_test_vector);
 	for (i = 0; i < num_tests; i++) {
 		ret = xhci_test_trb_in_td(xhci,
-				xhci->erst->ring->first_seg,
-				xhci->erst->ring->first_seg->trbs,
-				&xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+				xhci->erst[index]->ring->first_seg,
+				xhci->erst[index]->ring->first_seg->trbs,
+				&xhci->erst[index]->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 				simple_test_vector[i].input_dma,
 				simple_test_vector[i].result_seg,
 				"Simple", i);
@@ -1721,18 +1731,18 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, int i)
 {
 	u64 temp;
 	dma_addr_t deq;
 
-	deq = xhci_trb_virt_to_dma(xhci->erst->ring->deq_seg,
-			xhci->erst->ring->dequeue);
+	deq = xhci_trb_virt_to_dma(xhci->erst[i]->ring->deq_seg,
+			xhci->erst[i]->ring->dequeue);
 	if (deq == 0 && !in_interrupt())
 		xhci_warn(xhci, "WARN something wrong with SW event ring "
 				"dequeue ptr.\n");
 	/* Update HC event ring dequeue pointer */
-	temp = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue);
+	temp = xhci_read_64(xhci, &xhci->erst[i]->ir_set->erst_dequeue);
 	temp &= ERST_PTR_MASK;
 	/* Don't clear the EHB bit (which is RW1C) because
 	 * there might be more events to service.
@@ -1741,7 +1751,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
 	xhci_dbg(xhci, "// Write event ring dequeue pointer, "
 			"preserving EHB bit\n");
 	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
-			&xhci->erst->ir_set->erst_dequeue);
+			&xhci->erst[i]->ir_set->erst_dequeue);
 }
 
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
@@ -2033,70 +2043,92 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	xhci_print_run_regs(xhci);
 
 	/*
+	 * calculate number of msi-x vectors supported.
+	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+	 *   with max number of interrupters based on the xhci HCSPARAMS1.
+	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
+	 *   Add additional 1 vector to ensure always available interrupt.
+	 */
+	xhci->msix_count = min(num_online_cpus() + 1,
+				HCS_MAX_INTRS(xhci->hcs_params1));
+	 /* Support up to 32 interrupters and event rings */
+	if (xhci->msix_count > 32)
+		xhci->msix_count = 32;
+
+	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg(xhci, "// Allocating erst\n");
-	xhci->erst = kzalloc(sizeof(struct xhci_erst), flags);
-	if (!xhci->erst)
-		goto fail;
+	for (i = 0; i < xhci->msix_count; i++) {
+		xhci_dbg(xhci, "// Allocating erst %d\n", i);
+		xhci->erst[i] = kzalloc(sizeof(struct xhci_erst), flags);
+		if (!xhci->erst[i])
+			goto fail;
 
-	xhci_dbg(xhci, "// Allocating event ring\n");
-	xhci->erst->xhci = xhci;
-	xhci->erst->ir_set = &xhci->run_regs->ir_set[0];
-	xhci->erst->ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
-	if (!xhci->erst->ring)
-		goto fail;
-	if (xhci_check_trb_in_td_math(xhci, flags) < 0)
-		goto fail;
+		xhci_dbg(xhci, "// Allocating event ring %d\n", i);
+		xhci->erst[i]->xhci = xhci;
+		xhci->erst[i]->ir_set = &xhci->run_regs->ir_set[i];
+		xhci->erst[i]->ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS,
+							false, flags);
+		if (!xhci->erst[i]->ring)
+			goto fail;
+		if (xhci_check_trb_in_td_math(xhci, flags, i) < 0)
+			goto fail;
 
-	xhci->erst->entries = pci_alloc_consistent(to_pci_dev(dev),
+		xhci->erst[i]->entries = pci_alloc_consistent(to_pci_dev(dev),
 			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
-	if (!xhci->erst->entries)
-		goto fail;
-	xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
-			(unsigned long long)dma);
+		if (!xhci->erst[i]->entries)
+			goto fail;
+		xhci_dbg(xhci, "// Allocated event ring segment table "
+				"at 0x%llx\n", (unsigned long long)dma);
 
-	memset(xhci->erst->entries, 0,
+		memset(xhci->erst[i]->entries, 0,
 			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst->num_entries = ERST_NUM_SEGS;
-	xhci->erst->erst_dma_addr = dma;
-	xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
-			xhci->erst->num_entries,
-			xhci->erst->entries,
-			(unsigned long long)xhci->erst->erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->erst->ring->first_seg;
-			val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst->entries[val];
-		entry->seg_addr = cpu_to_le64(seg->dma);
-		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
+		xhci->erst[i]->num_entries = ERST_NUM_SEGS;
+		xhci->erst[i]->erst_dma_addr = dma;
+		xhci_dbg(xhci, "Set ERST to %d; private num segs = %i, "
+				"virt addr = %p, dma addr = 0x%llx\n", i,
+			xhci->erst[i]->num_entries,
+			xhci->erst[i]->entries,
+			(unsigned long long)xhci->erst[i]->erst_dma_addr);
+
+		/* set ring base address and size for each segment table entry
+		 */
+		for (val = 0, seg = xhci->erst[i]->ring->first_seg;
+				val < ERST_NUM_SEGS; val++) {
+			struct xhci_erst_entry *entry =
+				&xhci->erst[i]->entries[val];
+			entry->seg_addr = cpu_to_le64(seg->dma);
+			entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+			entry->rsvd = 0;
+			seg = seg->next;
+		}
 
-	/* set ERST count with the number of entries in the segment table */
-	val = xhci_readl(xhci, &xhci->erst->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
-			val);
-	xhci_writel(xhci, val, &xhci->erst->ir_set->erst_size);
-
-	xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
-	/* set the segment table base address */
-	xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
-			(unsigned long long)xhci->erst->erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->erst->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-	xhci_print_ir_set(xhci, 0);
+		/* set ERST count with the number of entries in the segment
+		 * table
+		 */
+		val = xhci_readl(xhci, &xhci->erst[i]->ir_set->erst_size);
+		val &= ERST_SIZE_MASK;
+		val |= ERST_NUM_SEGS;
+		xhci_dbg(xhci, "// Write ERST size = %i to ir_set %d "
+				"(some bits preserved)\n", val, i);
+		xhci_writel(xhci, val, &xhci->erst[i]->ir_set->erst_size);
+
+		xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+		/* set the segment table base address */
+		xhci_dbg(xhci, "// Set ERST base address for ir_set %d "
+				"= 0x%llx\n", i,
+			(unsigned long long)xhci->erst[i]->erst_dma_addr);
+		val_64 = xhci_read_64(xhci, &xhci->erst[i]->ir_set->erst_base);
+		val_64 &= ERST_PTR_MASK;
+		val_64 |= (xhci->erst[i]->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+		xhci_write_64(xhci, val_64, &xhci->erst[i]->ir_set->erst_base);
+
+		/* Set the event ring dequeue address */
+		xhci_set_hc_event_deq(xhci, i);
+		xhci_dbg(xhci, "Wrote ERST address to ir_set %d.\n", i);
+		xhci_print_ir_set(xhci, i);
+	}
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 519543c..cd3bad9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -96,9 +96,9 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
 		struct xhci_segment *seg, union xhci_trb *trb)
 {
-	if (ring == xhci->erst->ring)
+	if (ring == xhci->erst[0]->ring)
 		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
-			(seg->next == xhci->erst->ring->first_seg);
+			(seg->next == xhci->erst[0]->ring->first_seg);
 	else
 		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 }
@@ -110,7 +110,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
 		struct xhci_segment *seg, union xhci_trb *trb)
 {
-	if (ring == xhci->erst->ring)
+	if (ring == xhci->erst[0]->ring)
 		return trb == &seg->trbs[TRBS_PER_SEGMENT];
 	else
 		return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
@@ -202,7 +202,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 	 */
 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
 		if (!consumer) {
-			if (ring != xhci->erst->ring) {
+			if (ring != xhci->erst[0]->ring) {
 				/*
 				 * If the caller doesn't plan on enqueueing more
 				 * TDs before ringing the doorbell, then we
@@ -1346,7 +1346,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
 
 cleanup:
 	/* Update event ring dequeue pointer before dropping the lock */
-	inc_deq(xhci, xhci->erst->ring, true);
+	inc_deq(xhci, xhci->erst[0]->ring, true);
 
 	/* Don't make the USB core poll the roothub if we got a bad port status
 	 * change event.  Besides, at that point we can't tell which roothub
@@ -1611,7 +1611,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 
-	xhci_debug_trb(xhci, xhci->erst->ring->dequeue);
+	xhci_debug_trb(xhci, xhci->erst[0]->ring->dequeue);
 	switch (trb_comp_code) {
 	case COMP_SUCCESS:
 		if (event_trb == ep_ring->dequeue) {
@@ -2131,7 +2131,7 @@ cleanup:
 		 * Will roll back to continue process missed tds.
 		 */
 		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
-			inc_deq(xhci, xhci->erst->ring, true);
+			inc_deq(xhci, xhci->erst[0]->ring, true);
 		}
 
 		if (ret) {
@@ -2290,10 +2290,10 @@ hw_died:
 		u32 irq_pending;
 		/* Acknowledge the PCI interrupt */
 		irq_pending = xhci_readl(xhci,
-				&xhci->erst->ir_set->irq_pending);
+				&xhci->erst[0]->ir_set->irq_pending);
 		irq_pending |= 0x3;
 		xhci_writel(xhci, irq_pending,
-				&xhci->erst->ir_set->irq_pending);
+				&xhci->erst[0]->ir_set->irq_pending);
 	}
 
 	if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -2302,25 +2302,26 @@ hw_died:
 		/* Clear the event handler busy flag (RW1C);
 		 * the event ring should be empty.
 		 */
-		temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue);
+		temp_64 = xhci_read_64(xhci,
+					&xhci->erst[0]->ir_set->erst_dequeue);
 		xhci_write_64(xhci, temp_64 | ERST_EHB,
-				&xhci->erst->ir_set->erst_dequeue);
+				&xhci->erst[0]->ir_set->erst_dequeue);
 		spin_unlock(&xhci->lock);
 
 		return IRQ_HANDLED;
 	}
 
-	event_ring_deq = xhci->erst->ring->dequeue;
+	event_ring_deq = xhci->erst[0]->ring->dequeue;
 	/* FIXME this should be a delayed service routine
 	 * that clears the EHB.
 	 */
-	while (xhci_handle_event(xhci->erst) > 0) {}
+	while (xhci_handle_event(xhci->erst[0]) > 0) {}
 
-	temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue);
+	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
 	/* If necessary, update the HW's version of the event ring deq ptr. */
-	if (event_ring_deq != xhci->erst->ring->dequeue) {
-		deq = xhci_trb_virt_to_dma(xhci->erst->ring->deq_seg,
-				xhci->erst->ring->dequeue);
+	if (event_ring_deq != xhci->erst[0]->ring->dequeue) {
+		deq = xhci_trb_virt_to_dma(xhci->erst[0]->ring->deq_seg,
+				xhci->erst[0]->ring->dequeue);
 		if (deq == 0)
 			xhci_warn(xhci, "WARN something wrong with SW event "
 					"ring dequeue ptr.\n");
@@ -2331,7 +2332,7 @@ hw_died:
 
 	/* Clear the event handler busy flag (RW1C); event ring is empty. */
 	temp_64 |= ERST_EHB;
-	xhci_write_64(xhci, temp_64, &xhci->erst->ir_set->erst_dequeue);
+	xhci_write_64(xhci, temp_64, &xhci->erst[0]->ir_set->erst_dequeue);
 
 	spin_unlock(&xhci->lock);
 
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 703f73b..eb6e33c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -192,9 +192,9 @@ static void xhci_free_irq(struct xhci_hcd *xhci)
 		for (i = 0; i < xhci->msix_count; i++)
 			if (xhci->msix_entries[i].vector)
 				free_irq(xhci->msix_entries[i].vector,
-						xhci->erst);
+						xhci->erst[0]);
 	} else if (pdev->irq >= 0)
-		free_irq(pdev->irq, xhci->erst);
+		free_irq(pdev->irq, xhci->erst[0]);
 
 	return;
 }
@@ -214,7 +214,7 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
 	}
 
 	ret = request_irq(pdev->irq, xhci_msi_irq, 0, "xhci_hcd",
-							xhci->erst);
+							xhci->erst[0]);
 	if (ret) {
 		xhci_err(xhci, "disable MSI interrupt\n");
 		pci_disable_msi(pdev);
@@ -232,16 +232,6 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
 	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
-	/*
-	 * calculate number of msi-x vectors supported.
-	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
-	 *   with max number of interrupters based on the xhci HCSPARAMS1.
-	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
-	 *   Add additional 1 vector to ensure always available interrupt.
-	 */
-	xhci->msix_count = min(num_online_cpus() + 1,
-				HCS_MAX_INTRS(xhci->hcs_params1));
-
 	xhci->msix_entries =
 		kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
 				GFP_KERNEL);
@@ -263,7 +253,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
 
 	for (i = 0; i < xhci->msix_count; i++) {
 		ret = request_irq(xhci->msix_entries[i].vector, xhci_msi_irq,
-					0, "xhci_hcd", xhci->erst);
+					0, "xhci_hcd", xhci->erst[0]);
 		if (ret)
 			goto disable_msix;
 	}
@@ -350,14 +340,14 @@ static void xhci_event_ring_work(unsigned long arg)
 		return;
 	}
 
-	temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending);
+	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
 	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
 	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
 	xhci->error_bitmask = 0;
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_segment(xhci, xhci->erst->ring->deq_seg);
-	xhci_dbg_ring_ptrs(xhci, xhci->erst->ring);
-	temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue);
+	xhci_debug_segment(xhci, xhci->erst[0]->ring->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->erst[0]->ring);
+	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
 	temp_64 &= ~ERST_PTR_MASK;
 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 	xhci_dbg(xhci, "Command ring:\n");
@@ -470,19 +460,19 @@ legacy_irq:
 	xhci_dbg_cmd_ptrs(xhci);
 
 	xhci_dbg(xhci, "ERST memory map follows:\n");
-	xhci_dbg_erst(xhci, xhci->erst);
+	xhci_dbg_erst(xhci, xhci->erst[0]);
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_ring(xhci, xhci->erst->ring);
-	xhci_dbg_ring_ptrs(xhci, xhci->erst->ring);
-	temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue);
+	xhci_debug_ring(xhci, xhci->erst[0]->ring);
+	xhci_dbg_ring_ptrs(xhci, xhci->erst[0]->ring);
+	temp_64 = xhci_read_64(xhci, &xhci->erst[0]->ir_set->erst_dequeue);
 	temp_64 &= ~ERST_PTR_MASK;
 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 
 	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
-	temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_control);
+	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_control);
 	temp &= ~ER_IRQ_INTERVAL_MASK;
 	temp |= (u32) 160;
-	xhci_writel(xhci, temp, &xhci->erst->ir_set->irq_control);
+	xhci_writel(xhci, temp, &xhci->erst[0]->ir_set->irq_control);
 
 	/* Set the HCD state before we enable the irqs */
 	temp = xhci_readl(xhci, &xhci->op_regs->command);
@@ -491,11 +481,12 @@ legacy_irq:
 			temp);
 	xhci_writel(xhci, temp, &xhci->op_regs->command);
 
-	temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending);
+	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
 	xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
-			xhci->erst->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+			xhci->erst[0]->ir_set,
+			(unsigned int) ER_IRQ_ENABLE(temp));
 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
-			&xhci->erst->ir_set->irq_pending);
+			&xhci->erst[0]->ir_set->irq_pending);
 	xhci_print_ir_set(xhci, 0);
 
 	if (xhci->quirks & XHCI_NEC_HOST)
@@ -562,9 +553,9 @@ void xhci_stop(struct usb_hcd *hcd)
 	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 	temp = xhci_readl(xhci, &xhci->op_regs->status);
 	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
-	temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending);
+	temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
-			&xhci->erst->ir_set->irq_pending);
+			&xhci->erst[0]->ir_set->irq_pending);
 	xhci_print_ir_set(xhci, 0);
 
 	xhci_dbg(xhci, "cleaning up memory\n");
@@ -604,13 +595,15 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
 	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
 	xhci->s3.irq_pending = xhci_readl(xhci,
-					&xhci->erst->ir_set->irq_pending);
+					&xhci->erst[0]->ir_set->irq_pending);
 	xhci->s3.irq_control = xhci_readl(xhci,
-					&xhci->erst->ir_set->irq_control);
-	xhci->s3.erst_size = xhci_readl(xhci, &xhci->erst->ir_set->erst_size);
-	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->erst->ir_set->erst_base);
+					&xhci->erst[0]->ir_set->irq_control);
+	xhci->s3.erst_size = xhci_readl(xhci,
+					&xhci->erst[0]->ir_set->erst_size);
+	xhci->s3.erst_base = xhci_read_64(xhci,
+					&xhci->erst[0]->ir_set->erst_base);
 	xhci->s3.erst_dequeue = xhci_read_64(xhci,
-					&xhci->erst->ir_set->erst_dequeue);
+					&xhci->erst[0]->ir_set->erst_dequeue);
 }
 
 static void xhci_restore_registers(struct xhci_hcd *xhci)
@@ -620,11 +613,13 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
 	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
 	xhci_writel(xhci, xhci->s3.irq_pending,
-					&xhci->erst->ir_set->irq_pending);
+					&xhci->erst[0]->ir_set->irq_pending);
 	xhci_writel(xhci, xhci->s3.irq_control,
-					&xhci->erst->ir_set->irq_control);
-	xhci_writel(xhci, xhci->s3.erst_size, &xhci->erst->ir_set->erst_size);
-	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->erst->ir_set->erst_base);
+					&xhci->erst[0]->ir_set->irq_control);
+	xhci_writel(xhci, xhci->s3.erst_size,
+					&xhci->erst[0]->ir_set->erst_size);
+	xhci_write_64(xhci, xhci->s3.erst_base,
+					&xhci->erst[0]->ir_set->erst_base);
 }
 
 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -804,9 +799,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 		temp = xhci_readl(xhci, &xhci->op_regs->status);
 		xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
-		temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending);
+		temp = xhci_readl(xhci, &xhci->erst[0]->ir_set->irq_pending);
 		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
-				&xhci->erst->ir_set->irq_pending);
+				&xhci->erst[0]->ir_set->irq_pending);
 		xhci_print_ir_set(xhci, 0);
 
 		xhci_dbg(xhci, "cleaning up memory\n");
@@ -1258,7 +1253,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
 	xhci_dbg(xhci, "Cancel URB %p\n", urb);
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_ring(xhci, xhci->erst->ring);
+	xhci_debug_ring(xhci, xhci->erst[0]->ring);
 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index c549a32..14b575b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1249,7 +1249,7 @@ struct xhci_hcd {
 	struct xhci_device_context_array *dcbaa;
 	struct xhci_ring	*cmd_ring;
 	unsigned int		cmd_ring_reserved_trbs;
-	struct xhci_erst	*erst;
+	struct xhci_erst	*erst[32];
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 
-- 
1.7.1


--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux