Lay the ground work for future handle_tx_event() rework, which will require a function which checks if a DMA address is in queueu. To its core, trb_in_td() checks if a TRB falls within the specified start and end TRB/segment range, a common requirement. For instance, a ring has pointers to the queues first and last TRB/segment, which means that with slight modifications and renaming trb_in_td() could work for other structures not only TDs. Modify trb_in_td() to accept pointer to start and end TRB/segment, and introduce a new function that takes a 'xhci_td' struct pointer, forwarding its elements to dma_in_range(), previously trb_in_td(). Signed-off-by: Niklas Neronin <niklas.neronin@xxxxxxxxxxxxxxx> --- drivers/usb/host/xhci-ring.c | 41 ++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 23337c9d34c1..34699038b7f2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -278,24 +278,28 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, } /* - * If the suspect DMA address is a TRB in this TD, this function returns that - * TRB's segment. Otherwise it returns 0. + * Check if the DMA address of a TRB falls within the specified range. + * The range is defined by 'start_trb' in 'start_seg' and 'end_trb' in 'end_seg'. + * If the TRB's DMA address is within this range, return the segment containing the TRB. + * Otherwise, return 'NULL'. */ -static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t dma) +static struct xhci_segment *dma_in_range(struct xhci_segment *start_seg, union xhci_trb *start_trb, + struct xhci_segment *end_seg, union xhci_trb *end_trb, + dma_addr_t dma) { - struct xhci_segment *seg = td->start_seg; + struct xhci_segment *seg = start_seg; - if (td->start_seg == td->end_seg) { - if (td->start_trb <= td->end_trb) { - if (xhci_trb_virt_to_dma(td->start_seg, td->start_trb) <= dma && - dma <= xhci_trb_virt_to_dma(td->end_seg, td->end_trb)) + if (start_seg == end_seg) { + if (start_trb <= end_trb) { + if (xhci_trb_virt_to_dma(start_seg, start_trb) <= dma && + dma <= xhci_trb_virt_to_dma(end_seg, end_trb)) return seg; return NULL; } /* Edge case, the TD wrapped around to the start segment. */ - if (xhci_trb_virt_to_dma(td->end_seg, td->end_trb) < dma && - dma < xhci_trb_virt_to_dma(td->start_seg, td->start_trb)) + if (xhci_trb_virt_to_dma(end_seg, end_trb) < dma && + dma < xhci_trb_virt_to_dma(start_seg, start_trb)) return NULL; if (seg->dma <= dma && dma <= (seg->dma + TRB_SEGMENT_SIZE)) return seg; @@ -304,24 +308,29 @@ static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t dma) /* Loop through segment which don't contain the DMA address. */ while (dma < seg->dma || (seg->dma + TRB_SEGMENT_SIZE) <= dma) { - if (seg == td->end_seg) + if (seg == end_seg) return NULL; seg = seg->next; - if (seg == td->start_seg) + if (seg == start_seg) return NULL; } - if (seg == td->start_seg) { - if (dma < xhci_trb_virt_to_dma(td->start_seg, td->start_trb)) + if (seg == start_seg) { + if (dma < xhci_trb_virt_to_dma(start_seg, start_trb)) return NULL; - } else if (seg == td->end_seg) { - if (xhci_trb_virt_to_dma(td->end_seg, td->end_trb) < dma) + } else if (seg == end_seg) { + if (xhci_trb_virt_to_dma(end_seg, end_trb) < dma) return NULL; } return seg; } +static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t dma) +{ + return dma_in_range(td->start_seg, td->start_trb, td->end_seg, td->end_trb, dma); +} + /* * Return number of free normal TRBs from enqueue to dequeue pointer on ring. * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment. -- 2.47.2