On 3/14/22 3:31 AM, Christoph Hellwig wrote:
- static void __init pci_xen_swiotlb_init(void) { if (!xen_initial_domain() && !x86_swiotlb_enable) return; x86_swiotlb_enable = true; - xen_swiotlb = true; - xen_swiotlb_init_early(); + swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
I think we need to have SWIOTLB_ANY set in x86_swiotlb_flags here.
dma_ops = &xen_swiotlb_dma_ops; if (IS_ENABLED(CONFIG_PCI)) pci_request_acs(); @@ -88,14 +85,16 @@ static void __init pci_xen_swiotlb_init(void)int pci_xen_swiotlb_init_late(void){ - int rc; - - if (xen_swiotlb) + if (dma_ops == &xen_swiotlb_dma_ops) return 0;- rc = xen_swiotlb_init();- if (rc) - return rc; + /* we can work with the default swiotlb */ + if (!io_tlb_default_mem.nslabs) { + int rc = swiotlb_init_late(swiotlb_size_or_default(), + GFP_KERNEL, xen_swiotlb_fixup);
This may be comment for previous patch but looking at swiotlb_init_late(): retry: order = get_order(nslabs << IO_TLB_SHIFT); nslabs = SLABS_PER_PAGE << order; bytes = nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, order); if (vstart) break; order--; } if (!vstart) return -ENOMEM; if (remap) rc = remap(vstart, nslabs); if (rc) { free_pages((unsigned long)vstart, order); /* Min is 2MB */ if (nslabs <= 1024) return rc; nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); goto retry; } if (order != get_order(bytes)) { pr_warn("only able to allocate %ld MB\n", (PAGE_SIZE << order) >> 20); nslabs = SLABS_PER_PAGE << order; <======= } rc = swiotlb_late_init_with_tbl(vstart, nslabs); Notice that we don't do remap() after final update to nslabs. We should. -boris