On Fri, 15 Sep 2023 17:09:56 -0700 Wesley Cheng <quic_wcheng@xxxxxxxxxxx> > +static int > +xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring, struct device *dev) > +{ > + struct sg_table *sgt; > + struct xhci_segment *seg; > + struct page **pages; > + unsigned int n_pages; > + size_t sz; > + int i; > + > + sz = ring->num_segs * TRB_SEGMENT_SIZE; > + n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT; > + pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); > + if (!pages) > + return 0; > + > + sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); > + if (!sgt) { > + kvfree(pages); > + return 0; > + } > + > + seg = ring->first_seg; > + > + /* > + * Rings can potentially have multiple segments, create an array that > + * carries page references to allocated segments. Utilize the > + * sg_alloc_table_from_pages() to create the sg table, and to ensure > + * that page links are created. > + */ > + for (i = 0; i < ring->num_segs; i++) { > + pages[i] = vmalloc_to_page(seg->trbs); > + seg = seg->next; > + } Given dma_pool_zalloc() in xhci_segment_alloc() and dma_alloc_coherent() in pool_alloc_page(), it is incorrect to get page from the cpu address returned by the dma alloc routine.