We are now re-probing the csum related fields and trying to have XDP and RX hw checksum capabilities coexist on the XDP path. For the benefit of: 1. RX hw checksum capability can be used if XDP is loaded. 2. Avoid packet loss when loading XDP in the vm-vm scenario. Signed-off-by: Heng Qi <hengqi@xxxxxxxxxxxxxxxxx> Reviewed-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- v3->v4: - Rewrite some comments. v2->v3: - Use skb_checksum_setup() instead of virtnet_flow_dissect_udp_tcp(). Essentially equivalent. drivers/net/virtio_net.c | 82 +++++++++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 13 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5a7f7a76b920..a47342f972b5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1568,6 +1568,41 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); } +static int virtnet_set_csum_after_xdp(struct virtnet_info *vi, + struct sk_buff *skb, + __u8 flags) +{ + int err = 0; + + /* When XDP program is loaded, the vm-vm scenario on the same host, + * packets marked VIRTIO_NET_HDR_F_NEEDS_CSUM without a complete checksum + * will travel. Although these packets are safe from the point of + * view of the vm, in order to be successfully forwarded on the upper + * layer and to avoid packet loss caused by XDP modification, + * we re-probe the necessary checksum related information: + * skb->csum_{start, offset}, pseudo-header checksum. + * + * If the received packet is marked VIRTIO_NET_HDR_F_DATA_VALID: + * when _F_GUEST_CSUM is negotiated, the device validates the checksum + * and virtio-net sets skb->ip_summed to CHECKSUM_UNNECESSARY; + * otherwise, virtio-net hands over to the stack to validate the checksum. + */ + if (flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + /* No need to care about SCTP because virtio-net currently doesn't + * support SCTP CRC checksum offloading, that is, SCTP packets have + * complete checksums. + */ + err = skb_checksum_setup(skb, true); + } else if (flags & VIRTIO_NET_HDR_F_DATA_VALID) { + /* XDP guarantees that packets marked as VIRTIO_NET_HDR_F_DATA_VALID + * still have correct checksum after they are processed. + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + return err; +} + static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, @@ -1576,6 +1611,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; + __u8 flags; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -1584,6 +1620,12 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, return; } + /* XDP may modify/overwrite the packet, including the virtnet hdr, + * so save the flags of the virtnet hdr before XDP processing. + */ + if (unlikely(vi->xdp_enabled)) + flags = ((struct virtio_net_hdr_mrg_rxbuf *)buf)->hdr.flags; + if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); @@ -1595,23 +1637,37 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!skb)) return; - hdr = skb_vnet_hdr(skb); - if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) - virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); - - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (unlikely(vi->xdp_enabled)) { + /* Required to do this before re-probing and calculating + * the pseudo-header checksum. + */ + skb->protocol = eth_type_trans(skb, dev); + skb_reset_network_header(skb); + if (virtnet_set_csum_after_xdp(vi, skb, flags) < 0) { + pr_debug("%s: errors occurred in setting partial csum", + dev->name); + goto frame_err; + } + } else { + hdr = skb_vnet_hdr(skb); + if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) + virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); + + if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (virtio_net_hdr_to_skb(skb, &hdr->hdr, + virtio_is_little_endian(vi->vdev))) { + net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", + dev->name, hdr->hdr.gso_type, + hdr->hdr.gso_size); + goto frame_err; + } - if (virtio_net_hdr_to_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev))) { - net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", - dev->name, hdr->hdr.gso_type, - hdr->hdr.gso_size); - goto frame_err; + skb->protocol = eth_type_trans(skb, dev); } skb_record_rx_queue(skb, vq2rxq(rq->vq)); - skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); -- 2.19.1.6.gb485710b