On Mon, Apr 17, 2023 at 11:27:50AM +0800, Xuan Zhuo wrote: > @@ -532,9 +545,9 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) > xskb->xdp.data_meta = xskb->xdp.data; > > if (pool->dma_need_sync) { > - dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, > - pool->frame_len, > - DMA_BIDIRECTIONAL); > + pool->dma_ops.sync_single_range_for_device(pool->dev, xskb->dma, 0, > + pool->frame_len, > + DMA_BIDIRECTIONAL); > } > return &xskb->xdp; > } > @@ -670,15 +683,15 @@ EXPORT_SYMBOL(xp_raw_get_dma); > > void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) > { > - dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, > - xskb->pool->frame_len, DMA_BIDIRECTIONAL); > + xskb->pool->dma_ops.sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, > + xskb->pool->frame_len, DMA_BIDIRECTIONAL); > } > EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); > > void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, > size_t size) > { > - dma_sync_single_range_for_device(pool->dev, dma, 0, > - size, DMA_BIDIRECTIONAL); > + pool->dma_ops.sync_single_range_for_device(pool->dev, dma, 0, > + size, DMA_BIDIRECTIONAL); > } > EXPORT_SYMBOL(xp_dma_sync_for_device_slow); So you add an indirect function call on data path? Won't this be costly? > -- > 2.32.0.3.g01195cf9f _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization