For some drivers, such as virtio-net, we do not configure dma when binding xsk. We will get the page when sending. This patch participates in a field need_dma during the setup pool. If the device does not use dma, this value should be set to false. And a function xsk_buff_raw_get_page is added to get the page based on addr in drv. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- include/linux/netdevice.h | 1 + include/net/xdp_sock_drv.h | 10 ++++++++++ include/net/xsk_buff_pool.h | 1 + net/xdp/xsk_buff_pool.c | 10 +++++++++- 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5b94907..b452ade 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -914,6 +914,7 @@ struct netdev_bpf { struct { struct xsk_buff_pool *pool; u16 queue_id; + bool need_dma; } xsk; }; }; diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 4e295541..e9c7e25 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -100,6 +100,11 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return xp_raw_get_data(pool, addr); } +static inline struct page *xsk_buff_raw_get_page(struct xsk_buff_pool *pool, u64 addr) +{ + return xp_raw_get_page(pool, addr); +} + static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); @@ -232,6 +237,11 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return NULL; } +static inline struct page *xsk_buff_raw_get_page(struct xsk_buff_pool *pool, u64 addr) +{ + return NULL; +} + static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { } diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index eaa8386..2dcfa54 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -108,6 +108,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); +struct page *xp_raw_get_page(struct xsk_buff_pool *pool, u64 addr); static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) { return xskb->dma; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 20598ee..6d0cc9f 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -166,12 +166,13 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool, bpf.command = XDP_SETUP_XSK_POOL; bpf.xsk.pool = pool; bpf.xsk.queue_id = queue_id; + bpf.xsk.need_dma = true; err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); if (err) goto err_unreg_pool; - if (!pool->dma_pages) { + if (bpf.xsk.need_dma && !pool->dma_pages) { WARN(1, "Driver did not DMA map zero-copy buffers"); err = -EINVAL; goto err_unreg_xsk; @@ -535,6 +536,13 @@ void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) } EXPORT_SYMBOL(xp_raw_get_data); +struct page *xp_raw_get_page(struct xsk_buff_pool *pool, u64 addr) +{ + addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; + return pool->umem->pgs[addr >> PAGE_SHIFT]; +} +EXPORT_SYMBOL(xp_raw_get_page); + dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) { addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; -- 1.8.3.1