In the (rare) case where new descriptors are used while virtio_net enables vq callback for the TX vq, virtio_net uses the number of sg entries in the skb it frees to calculate how many descriptors in the ring have just been made available. But this value is an overestimate: with indirect buffers each skb only uses one descriptor entry, meaning we may wake the queue only to find we are still out of space and need to stop the ring almost at once. Using the new virtqueue_min_capacity() call, we can determine the remaining capacity, so we should use that instead. This estimation is worst-case which is consistent with the value returned by virtqueue_add_buf. Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx> --- drivers/net/virtio_net.c | 9 ++++----- 1 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f685324..a0ee78d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -509,19 +509,17 @@ again: return received; } -static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) +static void free_old_xmit_skbs(struct virtnet_info *vi) { struct sk_buff *skb; - unsigned int len, tot_sgs = 0; + unsigned int len; while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_packets++; - tot_sgs += skb_vnet_hdr(skb)->num_sg; dev_kfree_skb_any(skb); } - return tot_sgs; } static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) @@ -611,7 +609,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { /* More just got used, free them then recheck. */ - capacity += free_old_xmit_skbs(vi); + free_old_xmit_skbs(vi); + capacity = virtqueue_min_capacity(vi->svq); if (capacity >= 2+MAX_SKB_FRAGS) { netif_start_queue(dev); virtqueue_disable_cb(vi->svq); -- 1.7.5.53.gc233e -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html