From: Niklas Neronin <niklas.neronin@xxxxxxxxxxxxxxx> Prepare for splitting ring segments allocation and initialization by reworking the xhci_link_segments() function. Segment linking and ring type checks are moved out of xhci_link_segments(), and the function is renamed to "xhci_set_link_trb()". The goal is to keep ring linking within xhci_alloc_segments_for_ring() and move initialization into a separate function. Additionally, reorder and simplify xhci_set_link_trb() for better readability. Signed-off-by: Niklas Neronin <niklas.neronin@xxxxxxxxxxxxxxx> Signed-off-by: Mathias Nyman <mathias.nyman@xxxxxxxxxxxxxxx> --- drivers/usb/host/xhci-mem.c | 54 ++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index feeaafc59a39..41a5e67e1c4f 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -86,33 +86,31 @@ static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *rin } /* - * Make the prev segment point to the next segment. + * Only for transfer and command rings where driver is the producer, not for + * event rings. * - * Change the last TRB in the prev segment to be a Link TRB which points to the + * Change the last TRB in the segment to be a Link TRB which points to the * DMA address of the next segment. The caller needs to set any Link TRB * related flags, such as End TRB, Toggle Cycle, and no snoop. */ -static void xhci_link_segments(struct xhci_segment *prev, - struct xhci_segment *next, - enum xhci_ring_type type, bool chain_links) +static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links) { + union xhci_trb *trb; u32 val; - if (!prev || !next) + if (!seg || !seg->next) return; - prev->next = next; - if (type != TYPE_EVENT) { - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = - cpu_to_le64(next->dma); - /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ - val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); - val &= ~TRB_TYPE_BITMASK; - val |= TRB_TYPE(TRB_LINK); - if (chain_links) - val |= TRB_CHAIN; - prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); - } + trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; + + /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ + val = le32_to_cpu(trb->link.control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + if (chain_links) + val |= TRB_CHAIN; + trb->link.control = cpu_to_le32(val); + trb->link.segment_ptr = cpu_to_le64(seg->next->dma); } /* @@ -127,8 +125,6 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct if (!src || !dst) return; - chain_links = xhci_link_chain_quirk(xhci, dst->type); - /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (dst->cycle_state == 0) { xhci_for_each_ring_seg(src->first_seg, seg) { @@ -137,8 +133,13 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct } } - xhci_link_segments(src->last_seg, dst->enq_seg->next, dst->type, chain_links); - xhci_link_segments(dst->enq_seg, src->first_seg, dst->type, chain_links); + src->last_seg->next = dst->enq_seg->next; + dst->enq_seg->next = src->first_seg; + if (dst->type != TYPE_EVENT) { + chain_links = xhci_link_chain_quirk(xhci, dst->type); + xhci_set_link_trb(dst->enq_seg, chain_links); + xhci_set_link_trb(src->last_seg, chain_links); + } dst->num_segs += src->num_segs; if (dst->enq_seg == dst->last_seg) { @@ -331,13 +332,18 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring if (!next) goto free_segments; - xhci_link_segments(prev, next, ring->type, chain_links); + prev->next = next; + if (ring->type != TYPE_EVENT) + xhci_set_link_trb(prev, chain_links); prev = next; num++; } - xhci_link_segments(prev, ring->first_seg, ring->type, chain_links); ring->last_seg = prev; + ring->last_seg->next = ring->first_seg; + if (ring->type != TYPE_EVENT) + xhci_set_link_trb(prev, chain_links); + return 0; free_segments: -- 2.25.1