This patch changes xhci->erst to a pointer, because in the following interrupters implementation, xhci will have multiple erst. xhci->erst will be extended to a pointer array and allocate memory when needed will prevent unnecessary memory cost. Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx> --- drivers/usb/host/xhci-mem.c | 154 ++++++++++++++++++++++------------------- drivers/usb/host/xhci-ring.c | 38 ++++++----- drivers/usb/host/xhci.c | 68 ++++++++++--------- drivers/usb/host/xhci.h | 2 +- 4 files changed, 141 insertions(+), 121 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f8bedc3..89fb1dc 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1495,21 +1495,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int i; /* Free the Event Ring Segment Table and the actual Event Ring */ - if (xhci->erst.ir_set) { - xhci_writel(xhci, 0, &xhci->erst.ir_set->erst_size); - xhci_write_64(xhci, 0, &xhci->erst.ir_set->erst_base); - xhci_write_64(xhci, 0, &xhci->erst.ir_set->erst_dequeue); + if (xhci->erst->ir_set) { + xhci_writel(xhci, 0, &xhci->erst->ir_set->erst_size); + xhci_write_64(xhci, 0, &xhci->erst->ir_set->erst_base); + xhci_write_64(xhci, 0, &xhci->erst->ir_set->erst_dequeue); } - size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); - if (xhci->erst.entries) + size = sizeof(struct xhci_erst_entry)*(xhci->erst->num_entries); + if (xhci->erst->entries) pci_free_consistent(pdev, size, - xhci->erst.entries, xhci->erst.erst_dma_addr); - xhci->erst.entries = NULL; - xhci_dbg(xhci, "Freed ERST\n"); - if (xhci->erst.ring) - xhci_ring_free(xhci, xhci->erst.ring); - xhci->erst.ring = NULL; + xhci->erst->entries, xhci->erst->erst_dma_addr); + xhci->erst->entries = NULL; + if (xhci->erst->ring) + xhci_ring_free(xhci, xhci->erst->ring); + xhci->erst->ring = NULL; xhci_dbg(xhci, "Freed event ring\n"); + kfree(xhci->erst); + xhci_dbg(xhci, "Freed ERST\n"); xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); if (xhci->cmd_ring) @@ -1604,18 +1605,21 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) /* A zeroed DMA field should fail */ { 0, NULL }, /* One TRB before the ring start should fail */ - { xhci->erst.ring->first_seg->dma - 16, NULL }, + { xhci->erst->ring->first_seg->dma - 16, NULL }, /* One byte before the ring start should fail */ - { xhci->erst.ring->first_seg->dma - 1, NULL }, + { xhci->erst->ring->first_seg->dma - 1, NULL }, /* Starting TRB should succeed */ - { xhci->erst.ring->first_seg->dma, xhci->erst.ring->first_seg }, + { xhci->erst->ring->first_seg->dma, + xhci->erst->ring->first_seg }, /* Ending TRB should succeed */ - { xhci->erst.ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, - xhci->erst.ring->first_seg }, + { xhci->erst->ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, + xhci->erst->ring->first_seg }, /* One byte after the ring end should fail */ - { xhci->erst.ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, + { xhci->erst->ring->first_seg->dma + + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, /* One TRB after the ring end should fail */ - { xhci->erst.ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, + { xhci->erst->ring->first_seg->dma + + (TRBS_PER_SEGMENT)*16, NULL }, /* An address of all ones should fail */ { (dma_addr_t) (~0), NULL }, }; @@ -1627,58 +1631,59 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) struct xhci_segment *result_seg; } complex_test_vector [] = { /* Test feeding a valid DMA address from a different ring */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = xhci->erst.ring->first_seg->trbs, - .end_trb = &xhci->erst.ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = xhci->erst->ring->first_seg->trbs, + .end_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid end TRB from a different ring */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = xhci->erst.ring->first_seg->trbs, + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = xhci->erst->ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid start and end TRB from a different ring */ - { .input_seg = xhci->erst.ring->first_seg, + { .input_seg = xhci->erst->ring->first_seg, .start_trb = xhci->cmd_ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* TRB in this ring, but after this TD */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = &xhci->erst.ring->first_seg->trbs[0], - .end_trb = &xhci->erst.ring->first_seg->trbs[3], - .input_dma = xhci->erst.ring->first_seg->dma + 4*16, + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = &xhci->erst->ring->first_seg->trbs[0], + .end_trb = &xhci->erst->ring->first_seg->trbs[3], + .input_dma = xhci->erst->ring->first_seg->dma + 4*16, .result_seg = NULL, }, /* TRB in this ring, but before this TD */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = &xhci->erst.ring->first_seg->trbs[3], - .end_trb = &xhci->erst.ring->first_seg->trbs[6], - .input_dma = xhci->erst.ring->first_seg->dma + 2*16, + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = &xhci->erst->ring->first_seg->trbs[3], + .end_trb = &xhci->erst->ring->first_seg->trbs[6], + .input_dma = xhci->erst->ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but after this wrapped TD */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = &xhci->erst.ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->erst.ring->first_seg->trbs[1], - .input_dma = xhci->erst.ring->first_seg->dma + 2*16, + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->erst->ring->first_seg->trbs[1], + .input_dma = xhci->erst->ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but before this wrapped TD */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = &xhci->erst.ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->erst.ring->first_seg->trbs[1], - .input_dma = xhci->erst.ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->erst->ring->first_seg->trbs[1], + .input_dma = xhci->erst->ring->first_seg->dma + + (TRBS_PER_SEGMENT - 4)*16, .result_seg = NULL, }, /* TRB not in this ring, and we have a wrapped TD */ - { .input_seg = xhci->erst.ring->first_seg, - .start_trb = &xhci->erst.ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->erst.ring->first_seg->trbs[1], + { .input_seg = xhci->erst->ring->first_seg, + .start_trb = &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->erst->ring->first_seg->trbs[1], .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, .result_seg = NULL, }, @@ -1690,9 +1695,9 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) num_tests = ARRAY_SIZE(simple_test_vector); for (i = 0; i < num_tests; i++) { ret = xhci_test_trb_in_td(xhci, - xhci->erst.ring->first_seg, - xhci->erst.ring->first_seg->trbs, - &xhci->erst.ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + xhci->erst->ring->first_seg, + xhci->erst->ring->first_seg->trbs, + &xhci->erst->ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], simple_test_vector[i].input_dma, simple_test_vector[i].result_seg, "Simple", i); @@ -1721,13 +1726,13 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) u64 temp; dma_addr_t deq; - deq = xhci_trb_virt_to_dma(xhci->erst.ring->deq_seg, - xhci->erst.ring->dequeue); + deq = xhci_trb_virt_to_dma(xhci->erst->ring->deq_seg, + xhci->erst->ring->dequeue); if (deq == 0 && !in_interrupt()) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + temp = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. @@ -1736,7 +1741,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_dbg(xhci, "// Write event ring dequeue pointer, " "preserving EHB bit\n"); xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, - &xhci->erst.ir_set->erst_dequeue); + &xhci->erst->ir_set->erst_dequeue); } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, @@ -2031,33 +2036,40 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST). Section 4.9.3. */ + xhci_dbg(xhci, "// Allocating erst\n"); + xhci->erst = kzalloc(sizeof(struct xhci_erst), flags); + if (!xhci->erst) + goto fail; + xhci_dbg(xhci, "// Allocating event ring\n"); - xhci->erst.xhci = xhci; - xhci->erst.ir_set = &xhci->run_regs->ir_set[0]; - xhci->erst.ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); - if (!xhci->erst.ring) + xhci->erst->xhci = xhci; + xhci->erst->ir_set = &xhci->run_regs->ir_set[0]; + xhci->erst->ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); + if (!xhci->erst->ring) goto fail; if (xhci_check_trb_in_td_math(xhci, flags) < 0) goto fail; - xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), + xhci->erst->entries = pci_alloc_consistent(to_pci_dev(dev), sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); - if (!xhci->erst.entries) + if (!xhci->erst->entries) goto fail; xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", (unsigned long long)dma); - memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); - xhci->erst.num_entries = ERST_NUM_SEGS; - xhci->erst.erst_dma_addr = dma; + memset(xhci->erst->entries, 0, + sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); + xhci->erst->num_entries = ERST_NUM_SEGS; + xhci->erst->erst_dma_addr = dma; xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", - xhci->erst.num_entries, - xhci->erst.entries, - (unsigned long long)xhci->erst.erst_dma_addr); + xhci->erst->num_entries, + xhci->erst->entries, + (unsigned long long)xhci->erst->erst_dma_addr); /* set ring base address and size for each segment table entry */ - for (val = 0, seg = xhci->erst.ring->first_seg; val < ERST_NUM_SEGS; val++) { - struct xhci_erst_entry *entry = &xhci->erst.entries[val]; + for (val = 0, seg = xhci->erst->ring->first_seg; + val < ERST_NUM_SEGS; val++) { + struct xhci_erst_entry *entry = &xhci->erst->entries[val]; entry->seg_addr = cpu_to_le64(seg->dma); entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); entry->rsvd = 0; @@ -2065,21 +2077,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) } /* set ERST count with the number of entries in the segment table */ - val = xhci_readl(xhci, &xhci->erst.ir_set->erst_size); + val = xhci_readl(xhci, &xhci->erst->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", val); - xhci_writel(xhci, val, &xhci->erst.ir_set->erst_size); + xhci_writel(xhci, val, &xhci->erst->ir_set->erst_size); xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); /* set the segment table base address */ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", - (unsigned long long)xhci->erst.erst_dma_addr); - val_64 = xhci_read_64(xhci, &xhci->erst.ir_set->erst_base); + (unsigned long long)xhci->erst->erst_dma_addr); + val_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_base); val_64 &= ERST_PTR_MASK; - val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); - xhci_write_64(xhci, val_64, &xhci->erst.ir_set->erst_base); + val_64 |= (xhci->erst->erst_dma_addr & (u64) ~ERST_PTR_MASK); + xhci_write_64(xhci, val_64, &xhci->erst->ir_set->erst_base); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b85ae40..519543c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -96,9 +96,9 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { - if (ring == xhci->erst.ring) + if (ring == xhci->erst->ring) return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && - (seg->next == xhci->erst.ring->first_seg); + (seg->next == xhci->erst->ring->first_seg); else return le32_to_cpu(trb->link.control) & LINK_TOGGLE; } @@ -110,7 +110,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { - if (ring == xhci->erst.ring) + if (ring == xhci->erst->ring) return trb == &seg->trbs[TRBS_PER_SEGMENT]; else return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) @@ -202,7 +202,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, */ while (last_trb(xhci, ring, ring->enq_seg, next)) { if (!consumer) { - if (ring != xhci->erst.ring) { + if (ring != xhci->erst->ring) { /* * If the caller doesn't plan on enqueueing more * TDs before ringing the doorbell, then we @@ -1346,7 +1346,7 @@ static void handle_port_status(struct xhci_hcd *xhci, cleanup: /* Update event ring dequeue pointer before dropping the lock */ - inc_deq(xhci, xhci->erst.ring, true); + inc_deq(xhci, xhci->erst->ring, true); /* Don't make the USB core poll the roothub if we got a bad port status * change event. Besides, at that point we can't tell which roothub @@ -1611,7 +1611,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); - xhci_debug_trb(xhci, xhci->erst.ring->dequeue); + xhci_debug_trb(xhci, xhci->erst->ring->dequeue); switch (trb_comp_code) { case COMP_SUCCESS: if (event_trb == ep_ring->dequeue) { @@ -2131,7 +2131,7 @@ cleanup: * Will roll back to continue process missed tds. */ if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { - inc_deq(xhci, xhci->erst.ring, true); + inc_deq(xhci, xhci->erst->ring, true); } if (ret) { @@ -2289,9 +2289,11 @@ hw_died: if (hcd->irq != -1) { u32 irq_pending; /* Acknowledge the PCI interrupt */ - irq_pending = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); + irq_pending = xhci_readl(xhci, + &xhci->erst->ir_set->irq_pending); irq_pending |= 0x3; - xhci_writel(xhci, irq_pending, &xhci->erst.ir_set->irq_pending); + xhci_writel(xhci, irq_pending, + &xhci->erst->ir_set->irq_pending); } if (xhci->xhc_state & XHCI_STATE_DYING) { @@ -2300,25 +2302,25 @@ hw_died: /* Clear the event handler busy flag (RW1C); * the event ring should be empty. */ - temp_64 = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue); xhci_write_64(xhci, temp_64 | ERST_EHB, - &xhci->erst.ir_set->erst_dequeue); + &xhci->erst->ir_set->erst_dequeue); spin_unlock(&xhci->lock); return IRQ_HANDLED; } - event_ring_deq = xhci->erst.ring->dequeue; + event_ring_deq = xhci->erst->ring->dequeue; /* FIXME this should be a delayed service routine * that clears the EHB. */ - while (xhci_handle_event(&xhci->erst) > 0) {} + while (xhci_handle_event(xhci->erst) > 0) {} - temp_64 = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue); /* If necessary, update the HW's version of the event ring deq ptr. */ - if (event_ring_deq != xhci->erst.ring->dequeue) { - deq = xhci_trb_virt_to_dma(xhci->erst.ring->deq_seg, - xhci->erst.ring->dequeue); + if (event_ring_deq != xhci->erst->ring->dequeue) { + deq = xhci_trb_virt_to_dma(xhci->erst->ring->deq_seg, + xhci->erst->ring->dequeue); if (deq == 0) xhci_warn(xhci, "WARN something wrong with SW event " "ring dequeue ptr.\n"); @@ -2329,7 +2331,7 @@ hw_died: /* Clear the event handler busy flag (RW1C); event ring is empty. */ temp_64 |= ERST_EHB; - xhci_write_64(xhci, temp_64, &xhci->erst.ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64, &xhci->erst->ir_set->erst_dequeue); spin_unlock(&xhci->lock); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c0a16df..703f73b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -191,9 +191,10 @@ static void xhci_free_irq(struct xhci_hcd *xhci) if (xhci->msix_entries) { for (i = 0; i < xhci->msix_count; i++) if (xhci->msix_entries[i].vector) - free_irq(xhci->msix_entries[i].vector, &xhci->erst); + free_irq(xhci->msix_entries[i].vector, + xhci->erst); } else if (pdev->irq >= 0) - free_irq(pdev->irq, &xhci->erst); + free_irq(pdev->irq, xhci->erst); return; } @@ -213,7 +214,7 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) } ret = request_irq(pdev->irq, xhci_msi_irq, 0, "xhci_hcd", - &xhci->erst); + xhci->erst); if (ret) { xhci_err(xhci, "disable MSI interrupt\n"); pci_disable_msi(pdev); @@ -262,7 +263,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) for (i = 0; i < xhci->msix_count; i++) { ret = request_irq(xhci->msix_entries[i].vector, xhci_msi_irq, - 0, "xhci_hcd", &xhci->erst); + 0, "xhci_hcd", xhci->erst); if (ret) goto disable_msix; } @@ -349,14 +350,14 @@ static void xhci_event_ring_work(unsigned long arg) return; } - temp = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); + temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending); xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); xhci->error_bitmask = 0; xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_segment(xhci, xhci->erst.ring->deq_seg); - xhci_dbg_ring_ptrs(xhci, xhci->erst.ring); - temp_64 = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + xhci_debug_segment(xhci, xhci->erst->ring->deq_seg); + xhci_dbg_ring_ptrs(xhci, xhci->erst->ring); + temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg(xhci, "Command ring:\n"); @@ -469,19 +470,19 @@ legacy_irq: xhci_dbg_cmd_ptrs(xhci); xhci_dbg(xhci, "ERST memory map follows:\n"); - xhci_dbg_erst(xhci, &xhci->erst); + xhci_dbg_erst(xhci, xhci->erst); xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->erst.ring); - xhci_dbg_ring_ptrs(xhci, xhci->erst.ring); - temp_64 = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + xhci_debug_ring(xhci, xhci->erst->ring); + xhci_dbg_ring_ptrs(xhci, xhci->erst->ring); + temp_64 = xhci_read_64(xhci, &xhci->erst->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg(xhci, "// Set the interrupt modulation register\n"); - temp = xhci_readl(xhci, &xhci->erst.ir_set->irq_control); + temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; temp |= (u32) 160; - xhci_writel(xhci, temp, &xhci->erst.ir_set->irq_control); + xhci_writel(xhci, temp, &xhci->erst->ir_set->irq_control); /* Set the HCD state before we enable the irqs */ temp = xhci_readl(xhci, &xhci->op_regs->command); @@ -490,11 +491,11 @@ legacy_irq: temp); xhci_writel(xhci, temp, &xhci->op_regs->command); - temp = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); + temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending); xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", - xhci->erst.ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); + xhci->erst->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci_writel(xhci, ER_IRQ_ENABLE(temp), - &xhci->erst.ir_set->irq_pending); + &xhci->erst->ir_set->irq_pending); xhci_print_ir_set(xhci, 0); if (xhci->quirks & XHCI_NEC_HOST) @@ -561,9 +562,9 @@ void xhci_stop(struct usb_hcd *hcd) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); - temp = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); + temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), - &xhci->erst.ir_set->irq_pending); + &xhci->erst->ir_set->irq_pending); xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); @@ -602,11 +603,14 @@ static void xhci_save_registers(struct xhci_hcd *xhci) xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); - xhci->s3.irq_pending = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); - xhci->s3.irq_control = xhci_readl(xhci, &xhci->erst.ir_set->irq_control); - xhci->s3.erst_size = xhci_readl(xhci, &xhci->erst.ir_set->erst_size); - xhci->s3.erst_base = xhci_read_64(xhci, &xhci->erst.ir_set->erst_base); - xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->erst.ir_set->erst_dequeue); + xhci->s3.irq_pending = xhci_readl(xhci, + &xhci->erst->ir_set->irq_pending); + xhci->s3.irq_control = xhci_readl(xhci, + &xhci->erst->ir_set->irq_control); + xhci->s3.erst_size = xhci_readl(xhci, &xhci->erst->ir_set->erst_size); + xhci->s3.erst_base = xhci_read_64(xhci, &xhci->erst->ir_set->erst_base); + xhci->s3.erst_dequeue = xhci_read_64(xhci, + &xhci->erst->ir_set->erst_dequeue); } static void xhci_restore_registers(struct xhci_hcd *xhci) @@ -615,10 +619,12 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); - xhci_writel(xhci, xhci->s3.irq_pending, &xhci->erst.ir_set->irq_pending); - xhci_writel(xhci, xhci->s3.irq_control, &xhci->erst.ir_set->irq_control); - xhci_writel(xhci, xhci->s3.erst_size, &xhci->erst.ir_set->erst_size); - xhci_write_64(xhci, xhci->s3.erst_base, &xhci->erst.ir_set->erst_base); + xhci_writel(xhci, xhci->s3.irq_pending, + &xhci->erst->ir_set->irq_pending); + xhci_writel(xhci, xhci->s3.irq_control, + &xhci->erst->ir_set->irq_control); + xhci_writel(xhci, xhci->s3.erst_size, &xhci->erst->ir_set->erst_size); + xhci_write_64(xhci, xhci->s3.erst_base, &xhci->erst->ir_set->erst_base); } static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) @@ -798,9 +804,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); - temp = xhci_readl(xhci, &xhci->erst.ir_set->irq_pending); + temp = xhci_readl(xhci, &xhci->erst->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), - &xhci->erst.ir_set->irq_pending); + &xhci->erst->ir_set->irq_pending); xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); @@ -1252,7 +1258,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->erst.ring); + xhci_debug_ring(xhci, xhci->erst->ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep_ring = xhci_urb_to_transfer_ring(xhci, urb); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a1fd814..c549a32 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1249,7 +1249,7 @@ struct xhci_hcd { struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; unsigned int cmd_ring_reserved_trbs; - struct xhci_erst erst; + struct xhci_erst *erst; /* Scratchpad */ struct xhci_scratchpad *scratchpad; -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html