On Mon, 1 Jul 2024 11:25:37 +0800, Jason Wang <jasowang@xxxxxxxxxx> wrote: > On Mon, Jul 1, 2024 at 11:20 AM Jason Wang <jasowang@xxxxxxxxxx> wrote: > > > > On Fri, Jun 28, 2024 at 1:48 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote: > > > > > > On Fri, 28 Jun 2024 10:19:41 +0800, Jason Wang <jasowang@xxxxxxxxxx> wrote: > > > > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote: > > > > > > > > > > In the process: > > > > > 1. We may need to copy data to create skb for XDP_PASS. > > > > > 2. We may need to call xsk_buff_free() to release the buffer. > > > > > 3. The handle for xdp_buff is difference from the buffer. > > > > > > > > > > If we pushed this logic into existing receive handle(merge and small), > > > > > we would have to maintain code scattered inside merge and small (and big). > > > > > So I think it is a good choice for us to put the xsk code into an > > > > > independent function. > > > > > > > > I think it's better to try to reuse the existing functions. > > > > > > > > More below: > > > > > > > > > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> > > > > > --- > > > > > drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++- > > > > > 1 file changed, 133 insertions(+), 2 deletions(-) > > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > > index 2ac5668a94ce..06608d696e2e 100644 > > > > > --- a/drivers/net/virtio_net.c > > > > > +++ b/drivers/net/virtio_net.c > > > > > @@ -500,6 +500,10 @@ struct virtio_net_common_hdr { > > > > > }; > > > > > > > > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > > > > + struct net_device *dev, > > > > > + unsigned int *xdp_xmit, > > > > > + struct virtnet_rq_stats *stats); > > > > > > > > > > static bool is_xdp_frame(void *ptr) > > > > > { > > > > > @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) > > > > > sg->length = len; > > > > > } > > > > > > > > > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, > > > > > + struct receive_queue *rq, void *buf, u32 len) > > > > > +{ > > > > > + struct xdp_buff *xdp; > > > > > + u32 bufsize; > > > > > + > > > > > + xdp = (struct xdp_buff *)buf; > > > > > + > > > > > + bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len; > > > > > + > > > > > + if (unlikely(len > bufsize)) { > > > > > + pr_debug("%s: rx error: len %u exceeds truesize %u\n", > > > > > + vi->dev->name, len, bufsize); > > > > > + DEV_STATS_INC(vi->dev, rx_length_errors); > > > > > + xsk_buff_free(xdp); > > > > > + return NULL; > > > > > + } > > > > > + > > > > > + xsk_buff_set_size(xdp, len); > > > > > + xsk_buff_dma_sync_for_cpu(xdp); > > > > > + > > > > > + return xdp; > > > > > +} > > > > > + > > > > > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq, > > > > > + struct xdp_buff *xdp) > > > > > +{ > > > > > > > > So we have a similar caller which is receive_small_build_skb(). Any > > > > chance to reuse that? > > > > > > receive_small_build_skb works with build_skb. > > > > RIght. > > > > > > > > Here we need to copy the packet from the xsk buffer to the skb buffer. > > > So I do not think we can reuse it. > > > > > > > Let's rename this to xsk_construct_skb() ? > > > > > > > > > > > > > > + unsigned int metasize = xdp->data - xdp->data_meta; > > > > > + struct sk_buff *skb; > > > > > + unsigned int size; > > > > > + > > > > > + size = xdp->data_end - xdp->data_hard_start; > > > > > + skb = napi_alloc_skb(&rq->napi, size); > > > > > + if (unlikely(!skb)) { > > > > > + xsk_buff_free(xdp); > > > > > + return NULL; > > > > > + } > > > > > + > > > > > + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); > > > > > + > > > > > + size = xdp->data_end - xdp->data_meta; > > > > > + memcpy(__skb_put(skb, size), xdp->data_meta, size); > > > > > + > > > > > + if (metasize) { > > > > > + __skb_pull(skb, metasize); > > > > > + skb_metadata_set(skb, metasize); > > > > > + } > > > > > + > > > > > + xsk_buff_free(xdp); > > > > > + > > > > > + return skb; > > > > > +} > > > > > + > > > > > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, > > > > > + struct receive_queue *rq, struct xdp_buff *xdp, > > > > > + unsigned int *xdp_xmit, > > > > > + struct virtnet_rq_stats *stats) > > > > > +{ > > > > > + struct bpf_prog *prog; > > > > > + u32 ret; > > > > > + > > > > > + ret = XDP_PASS; > > > > > + rcu_read_lock(); > > > > > + prog = rcu_dereference(rq->xdp_prog); > > > > > + if (prog) > > > > > + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); > > > > > + rcu_read_unlock(); > > > > > + > > > > > + switch (ret) { > > > > > + case XDP_PASS: > > > > > + return xdp_construct_skb(rq, xdp); > > > > > + > > > > > + case XDP_TX: > > > > > + case XDP_REDIRECT: > > > > > + return NULL; > > > > > + > > > > > + default: > > > > > + /* drop packet */ > > > > > + xsk_buff_free(xdp); > > > > > + u64_stats_inc(&stats->drops); > > > > > + return NULL; > > > > > + } > > > > > +} > > > > > + > > > > > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, > > > > > + void *buf, u32 len, > > > > > + unsigned int *xdp_xmit, > > > > > + struct virtnet_rq_stats *stats) > > > > > +{ > > > > > + struct net_device *dev = vi->dev; > > > > > + struct sk_buff *skb = NULL; > > > > > + struct xdp_buff *xdp; > > > > > + > > > > > + len -= vi->hdr_len; > > > > > + > > > > > + u64_stats_add(&stats->bytes, len); > > > > > + > > > > > + xdp = buf_to_xdp(vi, rq, buf, len); > > > > > + if (!xdp) > > > > > + return NULL; > > > > > + > > > > > + if (unlikely(len < ETH_HLEN)) { > > > > > + pr_debug("%s: short packet %i\n", dev->name, len); > > > > > + DEV_STATS_INC(dev, rx_length_errors); > > > > > + xsk_buff_free(xdp); > > > > > + return NULL; > > > > > + } > > > > > + > > > > > + if (!vi->mergeable_rx_bufs) > > > > > + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); > > > > > + > > > > > + return skb; > > > > > +} > > > > > + > > > > > static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, > > > > > struct xsk_buff_pool *pool, gfp_t gfp) > > > > > { > > > > > @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget, > > > > > void *buf; > > > > > int i; > > > > > > > > > > - if (!vi->big_packets || vi->mergeable_rx_bufs) { > > > > > - void *ctx; > > > > > + if (rq->xsk.pool) { > > > > > + struct sk_buff *skb; > > > > > + > > > > > + while (packets < budget) { > > > > > + buf = virtqueue_get_buf(rq->vq, &len); > > > > > + if (!buf) > > > > > + break; > > > > > > > > > > + skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats); > > > > > + if (skb) > > > > > + virtnet_receive_done(vi, rq, skb); > > > > > + > > > > > + packets++; > > > > > + } > > > > > > > > If reusing turns out to be hard, I'd rather add new paths in receive_small(). > > > > > > The exist function is called after virtnet_rq_get_buf(), that will do dma unmap. > > > But for xsk, the dma unmap is not need. So xsk receive handle should use > > > virtqueue_get_buf directly. > > > > Probably but if it's just virtnet_rq_get_buf() we can simply did: > > > > static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) > > { > > void *buf; > > > > buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); > > if (buf && rq->xsk.pool) > > virtnet_rq_unmap(rq, buf, *len); > > > > return buf; > > } > > Or maybe it would be much more clearer if we did: > > static int virtnet_receive() > { > > if (rq->xsk.pool) > virtnet_receive_xsk() > else > virtnet_receive_xxx() > ... > } I like this. Thanks. > > Thanks > > > > > Thanks > > > > > > > > Thanks. > > > > > > > > > > > > + } else if (!vi->big_packets || vi->mergeable_rx_bufs) { > > > > > + void *ctx; > > > > > while (packets < budget && > > > > > (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { > > > > > receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); > > > > > -- > > > > > 2.32.0.3.g01195cf9f > > > > > > > > > > > > > Thanks > > > > > > > >