From: Wesley Cheng <quic_wcheng@xxxxxxxxxxx> As mentioned in: commit 474ed23a6257 ("xhci: align the last trb before link if it is easily splittable.") A bounce buffer is utilized for ensuring that transfers that span across ring segments are aligned to the EP's max packet size. However, the device that is used to map the DMA buffer to is currently using the XHCI HCD, which does not carry any DMA operations in certain configrations. Migration to using the sysdev entry was introduced for DWC3 based implementations where the IOMMU operations are present. Replace the reference to the controller device to sysdev instead. This allows the bounce buffer to be properly mapped to any implementations that have an IOMMU involved. cc: <stable@xxxxxxxxxxxxxxx> Fixes: 4c39d4b949d3 ("usb: xhci: use bus->sysdev for DMA configuration") Signed-off-by: Wesley Cheng <quic_wcheng@xxxxxxxxxxx> Signed-off-by: Mathias Nyman <mathias.nyman@xxxxxxxxxxxxxxx> --- drivers/usb/host/xhci-ring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1dde53f6eb31..98389b568633 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_td *td) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; size_t len; @@ -3469,7 +3469,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, u32 *trb_buff_len, struct xhci_segment *seg) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; -- 2.25.1