[RFC 2/4] xHCI: Allocate multiple ERST and event rings

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



xHCI can support multiple interrupters, which needs multiple ERST and
event rings, so device can decide which interrupter it will use and has
its events on the corresponding event ring.

This patch modifies xhci memory initialization and cleanup part to allocate
multiple erst and event rings, but still only use the first erst and
event ring, which will be modified by later patches.

The number of erst and event rings allocated is equal to the number of
MSI-X vectors and up to 32.

Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
---
 drivers/usb/host/xhci-mem.c  |  261 ++++++++++++++++++++++++------------------
 drivers/usb/host/xhci-ring.c |   40 +++---
 drivers/usb/host/xhci.c      |   22 +---
 drivers/usb/host/xhci.h      |    5 +-
 4 files changed, 180 insertions(+), 148 deletions(-)

diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ad098dd..4355fa1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1391,23 +1391,28 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 	int i;
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
-	if (xhci->ir_set) {
-		xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
-		xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
-		xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+	for (i = 0; i < xhci->msix_count; i++) {
+		xhci->ir_set = (void *) &xhci->run_regs->ir_set[i];
+		if (xhci->ir_set) {
+			xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+			xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
+			xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+		}
+		size = sizeof(struct xhci_erst_entry)*
+				(xhci->erst[i]->num_entries);
+		if (xhci->erst[i]->entries)
+			pci_free_consistent(pdev, size,
+				xhci->erst[i]->entries,
+				xhci->erst[i]->erst_dma_addr);
+		xhci->erst[i]->entries = NULL;
+		if (xhci->erst[i])
+			kfree(xhci->erst[i]);
+		xhci_dbg(xhci, "Freed ERST %d\n", i);
+		if (xhci->event_ring[i])
+			xhci_ring_free(xhci, xhci->event_ring[i]);
+		xhci->event_ring[i] = NULL;
+		xhci_dbg(xhci, "Freed event ring %d\n", i);
 	}
-	size = sizeof(struct xhci_erst_entry)*(xhci->erst->num_entries);
-	if (xhci->erst->entries)
-		pci_free_consistent(pdev, size,
-				xhci->erst->entries, xhci->erst->erst_dma_addr);
-	xhci->erst->entries = NULL;
-	if (xhci->erst)
-		kfree(xhci->erst);
-	xhci_dbg(xhci, "Freed ERST\n");
-	if (xhci->event_ring)
-		xhci_ring_free(xhci, xhci->event_ring);
-	xhci->event_ring = NULL;
-	xhci_dbg(xhci, "Freed event ring\n");
 
 	xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
 	if (xhci->cmd_ring)
@@ -1492,7 +1497,8 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
 }
 
 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, int index,
+				gfp_t mem_flags)
 {
 	struct {
 		dma_addr_t		input_dma;
@@ -1501,18 +1507,22 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 		/* A zeroed DMA field should fail */
 		{ 0, NULL },
 		/* One TRB before the ring start should fail */
-		{ xhci->event_ring->first_seg->dma - 16, NULL },
+		{ xhci->event_ring[index]->first_seg->dma - 16, NULL },
 		/* One byte before the ring start should fail */
-		{ xhci->event_ring->first_seg->dma - 1, NULL },
+		{ xhci->event_ring[index]->first_seg->dma - 1, NULL },
 		/* Starting TRB should succeed */
-		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
+		{ xhci->event_ring[index]->first_seg->dma,
+			xhci->event_ring[index]->first_seg },
 		/* Ending TRB should succeed */
-		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
-			xhci->event_ring->first_seg },
+		{ xhci->event_ring[index]->first_seg->dma +
+			(TRBS_PER_SEGMENT - 1)*16,
+			xhci->event_ring[index]->first_seg },
 		/* One byte after the ring end should fail */
-		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
+		{ xhci->event_ring[index]->first_seg->dma +
+			(TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
 		/* One TRB after the ring end should fail */
-		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
+		{ xhci->event_ring[index]->first_seg->dma +
+			(TRBS_PER_SEGMENT)*16, NULL },
 		/* An address of all ones should fail */
 		{ (dma_addr_t) (~0), NULL },
 	};
@@ -1524,58 +1534,62 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 		struct xhci_segment	*result_seg;
 	} complex_test_vector [] = {
 		/* Test feeding a valid DMA address from a different ring */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = xhci->event_ring->first_seg->trbs,
-			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = xhci->event_ring[index]->first_seg->trbs,
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* Test feeding a valid end TRB from a different ring */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = xhci->event_ring->first_seg->trbs,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = xhci->event_ring[index]->first_seg->trbs,
 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* Test feeding a valid start and end TRB from a different ring */
-		{	.input_seg = xhci->event_ring->first_seg,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
 			.start_trb = xhci->cmd_ring->first_seg->trbs,
 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 			.input_dma = xhci->cmd_ring->first_seg->dma,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but after this TD */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = &xhci->event_ring->first_seg->trbs[0],
-			.end_trb = &xhci->event_ring->first_seg->trbs[3],
-			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = &xhci->event_ring[index]->first_seg->trbs[0],
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[3],
+			.input_dma = xhci->event_ring[index]->first_seg->dma +
+					4*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but before this TD */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = &xhci->event_ring->first_seg->trbs[3],
-			.end_trb = &xhci->event_ring->first_seg->trbs[6],
-			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = &xhci->event_ring[index]->first_seg->trbs[3],
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[6],
+			.input_dma = xhci->event_ring[index]->first_seg->dma +
+					2*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but after this wrapped TD */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->event_ring->first_seg->trbs[1],
-			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = &xhci->event_ring[index]->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[1],
+			.input_dma = xhci->event_ring[index]->first_seg->dma +
+					2*16,
 			.result_seg = NULL,
 		},
 		/* TRB in this ring, but before this wrapped TD */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->event_ring->first_seg->trbs[1],
-			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = &xhci->event_ring[index]->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[1],
+			.input_dma = xhci->event_ring[index]->first_seg->dma +
+					(TRBS_PER_SEGMENT - 4)*16,
 			.result_seg = NULL,
 		},
 		/* TRB not in this ring, and we have a wrapped TD */
-		{	.input_seg = xhci->event_ring->first_seg,
-			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
-			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+		{	.input_seg = xhci->event_ring[index]->first_seg,
+			.start_trb = &xhci->event_ring[index]->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring[index]->first_seg->trbs[1],
 			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
 			.result_seg = NULL,
 		},
@@ -1587,9 +1601,9 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 	num_tests = ARRAY_SIZE(simple_test_vector);
 	for (i = 0; i < num_tests; i++) {
 		ret = xhci_test_trb_in_td(xhci,
-				xhci->event_ring->first_seg,
-				xhci->event_ring->first_seg->trbs,
-				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+				xhci->event_ring[index]->first_seg,
+				xhci->event_ring[index]->first_seg->trbs,
+				&xhci->event_ring[index]->first_seg->trbs[TRBS_PER_SEGMENT - 1],
 				simple_test_vector[i].input_dma,
 				simple_test_vector[i].result_seg,
 				"Simple", i);
@@ -1613,13 +1627,14 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, int index)
 {
 	u64 temp;
 	dma_addr_t deq;
 
-	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-			xhci->event_ring->dequeue);
+	xhci->ir_set = (void *) &xhci->run_regs->ir_set[index];
+	deq = xhci_trb_virt_to_dma(xhci->event_ring[index]->deq_seg,
+			xhci->event_ring[index]->dequeue);
 	if (deq == 0 && !in_interrupt())
 		xhci_warn(xhci, "WARN something wrong with SW event ring "
 				"dequeue ptr.\n");
@@ -1905,69 +1920,95 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	xhci->dba = (void *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
-	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+	/*
+	 * calculate number of msi-x vectors supported.
+	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+	 *   with max number of interrupters based on the xhci HCSPARAMS1.
+	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
+	 *   Add additional 1 vector to ensure always available interrupt.
+	 */
+	xhci->msix_count = min(num_online_cpus() + 1,
+				HCS_MAX_INTRS(xhci->hcs_params1));
+	/* Support up to 32 interrupters and event rings */
+	if (xhci->msix_count > 32)
+		xhci->msix_count = 32;
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg(xhci, "// Allocating erst\n");
-	xhci->erst = kzalloc(sizeof(struct xhci_erst), flags);
-	if (!xhci->erst)
-		goto fail;
-	xhci_dbg(xhci, "// Allocating event ring\n");
-	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
-	if (!xhci->event_ring)
-		goto fail;
-	if (xhci_check_trb_in_td_math(xhci, flags) < 0)
-		goto fail;
+	for (i = 0; i < xhci->msix_count; i++) {
+		/* Set ir_set to interrupt register set i */
+		xhci->ir_set = (void *) &xhci->run_regs->ir_set[i];
 
-	xhci->erst->entries = pci_alloc_consistent(to_pci_dev(dev),
+		xhci_dbg(xhci, "// Allocating erst %d\n", i);
+		xhci->erst[i] = kzalloc(sizeof(struct xhci_erst), flags);
+		if (!xhci->erst[i])
+			goto fail;
+		xhci_dbg(xhci, "// Allocating event ring %d\n", i);
+		xhci->event_ring[i] = xhci_ring_alloc(xhci, ERST_NUM_SEGS,
+					false, flags);
+		if (!xhci->event_ring[i])
+			goto fail;
+		if (xhci_check_trb_in_td_math(xhci, i, flags) < 0)
+			goto fail;
+
+		xhci->erst[i]->entries = pci_alloc_consistent(to_pci_dev(dev),
 			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
-	if (!xhci->erst->entries)
-		goto fail;
-	xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
-			(unsigned long long)dma);
-
-	memset(xhci->erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst->num_entries = ERST_NUM_SEGS;
-	xhci->erst->erst_dma_addr = dma;
-	xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
-			xhci->erst->num_entries,
-			xhci->erst->entries,
-			(unsigned long long)xhci->erst->erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst->entries[val];
-		entry->seg_addr = seg->dma;
-		entry->seg_size = TRBS_PER_SEGMENT;
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
+		if (!xhci->erst[i]->entries)
+			goto fail;
+		xhci_dbg(xhci, "// Allocated event ring segment "
+				"table at 0x%llx\n", (unsigned long long)dma);
+
+		memset(xhci->erst[i]->entries, 0,
+			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+		xhci->erst[i]->num_entries = ERST_NUM_SEGS;
+		xhci->erst[i]->erst_dma_addr = dma;
+		xhci_dbg(xhci, "Set ERST to %d; private num segs = %i, "
+				"virt addr = %p, dma addr = 0x%llx\n",
+			i, xhci->erst[i]->num_entries,
+			xhci->erst[i]->entries,
+			(unsigned long long)xhci->erst[i]->erst_dma_addr);
+
+		/* set ring base address and size for each segment table entry
+		 */
+		for (val = 0, seg = xhci->event_ring[i]->first_seg;
+			val < ERST_NUM_SEGS; val++) {
+			struct xhci_erst_entry *entry =
+				&xhci->erst[i]->entries[val];
+			entry->seg_addr = seg->dma;
+			entry->seg_size = TRBS_PER_SEGMENT;
+			entry->rsvd = 0;
+			seg = seg->next;
+		}
 
-	/* set ERST count with the number of entries in the segment table */
-	val = xhci_readl(xhci, &xhci->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
-			val);
-	xhci_writel(xhci, val, &xhci->ir_set->erst_size);
-
-	xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
-	/* set the segment table base address */
-	xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
-			(unsigned long long)xhci->erst->erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+		/* set ERST count with the number of entries in the segment
+		 * table
+		 */
+		val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+		val &= ERST_SIZE_MASK;
+		val |= ERST_NUM_SEGS;
+		xhci_dbg(xhci, "// Write ERST size = %i to ir_set %d "
+				"(some bits preserved)\n", val, i);
+		xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+		xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+		/* set the segment table base address */
+		xhci_dbg(xhci, "// Set ERST base address for ir_set %d "
+				"= 0x%llx\n",
+			i, (unsigned long long)xhci->erst[i]->erst_dma_addr);
+		val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+		val_64 &= ERST_PTR_MASK;
+		val_64 |= (xhci->erst[i]->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+		xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
+
+		/* Set the event ring dequeue address */
+		xhci_set_hc_event_deq(xhci, i);
+		xhci_dbg(xhci, "Wrote ERST address to ir_set %d.\n", i);
+		xhci_print_ir_set(xhci, xhci->ir_set, i);
+	}
+	xhci->ir_set = (void *) &xhci->run_regs->ir_set[0];
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c..864850e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -96,9 +96,9 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
 		struct xhci_segment *seg, union xhci_trb *trb)
 {
-	if (ring == xhci->event_ring)
+	if (ring == xhci->event_ring[0])
 		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
-			(seg->next == xhci->event_ring->first_seg);
+			(seg->next == xhci->event_ring[0]->first_seg);
 	else
 		return trb->link.control & LINK_TOGGLE;
 }
@@ -110,7 +110,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
 		struct xhci_segment *seg, union xhci_trb *trb)
 {
-	if (ring == xhci->event_ring)
+	if (ring == xhci->event_ring[0])
 		return trb == &seg->trbs[TRBS_PER_SEGMENT];
 	else
 		return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
@@ -165,8 +165,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
 		next = ring->dequeue;
 	}
 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
-	if (ring == xhci->event_ring)
-		xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
+	if (ring == xhci->event_ring[0])
+		xhci_dbg(xhci, "Event ring 0 deq = 0x%llx (DMA)\n", addr);
 	else if (ring == xhci->cmd_ring)
 		xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
 	else
@@ -206,7 +206,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 	 */
 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
 		if (!consumer) {
-			if (ring != xhci->event_ring) {
+			if (ring != xhci->event_ring[0]) {
 				/*
 				 * If the caller doesn't plan on enqueueing more
 				 * TDs before ringing the doorbell, then we
@@ -244,7 +244,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 		next = ring->enqueue;
 	}
 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
-	if (ring == xhci->event_ring)
+	if (ring == xhci->event_ring[0])
 		xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
 	else if (ring == xhci->cmd_ring)
 		xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
@@ -1227,7 +1227,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
 
 cleanup:
 	/* Update event ring dequeue pointer before dropping the lock */
-	inc_deq(xhci, xhci->event_ring, true);
+	inc_deq(xhci, xhci->event_ring[0], true);
 
 	spin_unlock(&xhci->lock);
 	/* Pass this up to the core */
@@ -1476,7 +1476,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
 
-	xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+	xhci_debug_trb(xhci, xhci->event_ring[0]->dequeue);
 	switch (trb_comp_code) {
 	case COMP_SUCCESS:
 		if (event_trb == ep_ring->dequeue) {
@@ -1969,7 +1969,7 @@ cleanup:
 		 * Will roll back to continue process missed tds.
 		 */
 		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
-			inc_deq(xhci, xhci->event_ring, true);
+			inc_deq(xhci, xhci->event_ring[0], true);
 		}
 
 		if (ret) {
@@ -2016,15 +2016,15 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
 	int ret;
 
 	xhci_dbg(xhci, "In %s\n", __func__);
-	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+	if (!xhci->event_ring[0] || !xhci->event_ring[0]->dequeue) {
 		xhci->error_bitmask |= 1 << 1;
 		return;
 	}
 
-	event = xhci->event_ring->dequeue;
+	event = xhci->event_ring[0]->dequeue;
 	/* Does the HC or OS own the TRB? */
 	if ((event->event_cmd.flags & TRB_CYCLE) !=
-			xhci->event_ring->cycle_state) {
+			xhci->event_ring[0]->cycle_state) {
 		xhci->error_bitmask |= 1 << 2;
 		return;
 	}
@@ -2069,7 +2069,7 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
 
 	if (update_ptrs)
 		/* Update SW event ring dequeue pointer */
-		inc_deq(xhci, xhci->event_ring, true);
+		inc_deq(xhci, xhci->event_ring[0], true);
 
 	/* Are there more items on the event ring? */
 	xhci_handle_event(xhci);
@@ -2090,7 +2090,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
 	dma_addr_t deq;
 
 	spin_lock(&xhci->lock);
-	trb = xhci->event_ring->dequeue;
+	trb = xhci->event_ring[0]->dequeue;
 	/* Check if the xHC generated the interrupt, or the irq is shared */
 	status = xhci_readl(xhci, &xhci->op_regs->status);
 	if (status == 0xffffffff)
@@ -2104,7 +2104,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
 	xhci_dbg(xhci, "Event ring dequeue ptr:\n");
 	xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
 			(unsigned long long)
-			xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
+			xhci_trb_virt_to_dma(xhci->event_ring[0]->deq_seg, trb),
 			lower_32_bits(trb->link.segment_ptr),
 			upper_32_bits(trb->link.segment_ptr),
 			(unsigned int) trb->link.intr_target,
@@ -2151,7 +2151,7 @@ hw_died:
 		return IRQ_HANDLED;
 	}
 
-	event_ring_deq = xhci->event_ring->dequeue;
+	event_ring_deq = xhci->event_ring[0]->dequeue;
 	/* FIXME this should be a delayed service routine
 	 * that clears the EHB.
 	 */
@@ -2159,9 +2159,9 @@ hw_died:
 
 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	/* If necessary, update the HW's version of the event ring deq ptr. */
-	if (event_ring_deq != xhci->event_ring->dequeue) {
-		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-				xhci->event_ring->dequeue);
+	if (event_ring_deq != xhci->event_ring[0]->dequeue) {
+		deq = xhci_trb_virt_to_dma(xhci->event_ring[0]->deq_seg,
+				xhci->event_ring[0]->dequeue);
 		if (deq == 0)
 			xhci_warn(xhci, "WARN something wrong with SW event "
 					"ring dequeue ptr.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d77b1b0..26ff10e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -229,16 +229,6 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
 	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
-	/*
-	 * calculate number of msi-x vectors supported.
-	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
-	 *   with max number of interrupters based on the xhci HCSPARAMS1.
-	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
-	 *   Add additional 1 vector to ensure always available interrupt.
-	 */
-	xhci->msix_count = min(num_online_cpus() + 1,
-				HCS_MAX_INTRS(xhci->hcs_params1));
-
 	xhci->msix_entries =
 		kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
 				GFP_KERNEL);
@@ -354,8 +344,8 @@ void xhci_event_ring_work(unsigned long arg)
 	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
 	xhci->error_bitmask = 0;
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
-	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	xhci_debug_segment(xhci, xhci->event_ring[0]->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring[0]);
 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	temp_64 &= ~ERST_PTR_MASK;
 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
@@ -446,10 +436,10 @@ int xhci_run(struct usb_hcd *hcd)
 	xhci_dbg_cmd_ptrs(xhci);
 
 	xhci_dbg(xhci, "ERST memory map follows:\n");
-	xhci_dbg_erst(xhci, xhci->erst);
+	xhci_dbg_erst(xhci, xhci->erst[0]);
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_ring(xhci, xhci->event_ring);
-	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	xhci_debug_ring(xhci, xhci->event_ring[0]);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring[0]);
 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	temp_64 &= ~ERST_PTR_MASK;
 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
@@ -1191,7 +1181,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
 	xhci_dbg(xhci, "Cancel URB %p\n", urb);
 	xhci_dbg(xhci, "Event ring:\n");
-	xhci_debug_ring(xhci, xhci->event_ring);
+	xhci_debug_ring(xhci, xhci->event_ring[0]);
 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8883ce0..4187bf4 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1199,8 +1199,9 @@ struct xhci_hcd {
 	struct xhci_device_context_array *dcbaa;
 	struct xhci_ring	*cmd_ring;
 	unsigned int		cmd_ring_reserved_trbs;
-	struct xhci_ring	*event_ring;
-	struct xhci_erst	*erst;
+	/* Up to 32 interrupters and event rings */
+	struct xhci_ring	*event_ring[32];
+	struct xhci_erst	*erst[32];
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 
-- 
1.7.0.4



--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux