This is an attempt to improve the latency of virtio-net while not hurting throughput. I wanted to try moving packet TX into a different thread so we can quickly return to the guest after it kicks us to send packets out. I also switched the order of when the tx_timer comes into play, so we can get an inital burst of packets out, then wait for the timer to fire and notify us if there's more to do. Here's what it does for me (average of 5 runs each, testing to a remote system on a 1Gb network): netperf TCP_STREAM: 939.22Mb/s -> 935.24Mb/s = 99.58% netperf TCP_RR: 2028.72/s -> 3927.99/s = 193.62% tbench: 92.99MB/s -> 99.97MB/s = 107.51% I'd be interested to hear if it helps or hurts anyone else. Thanks, Alex Signed-off-by: Alex Williamson <alex.williamson@xxxxxx> --- qemu/hw/virtio-net.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 57 insertions(+), 0 deletions(-) diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c index 8f3c41d..26508bd 100644 --- a/qemu/hw/virtio-net.c +++ b/qemu/hw/virtio-net.c @@ -17,6 +17,8 @@ #include "virtio-net.h" #ifdef USE_KVM #include "qemu-kvm.h" +#include "compatfd.h" +#include "qemu-char.h" #endif #define TAP_VNET_HDR @@ -49,6 +51,9 @@ typedef struct VirtIONet int enabled; uint32_t *vlans; } vlan_table; +#ifdef USE_KVM + int fds[2]; +#endif } VirtIONet; /* TODO @@ -570,10 +575,50 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) } } +#ifdef USE_KVM +static void virtio_net_do_tx(void *opaque) +{ + VirtIONet *n = opaque; + + virtio_net_flush_tx(n, n->tx_vq); + qemu_mod_timer(n->tx_timer, + qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL); +} + +static void virtio_net_notify_tx(int fd) +{ + uint64_t value = 1; + char buffer[8]; + size_t offset = 0; + + memcpy(buffer, &value, sizeof(value)); + + while (offset < 8) { + ssize_t len; + + len = write(fd, buffer + offset, 8 - offset); + if (len == -1 && errno == EINTR) + continue; + + if (len <= 0) + break; + + offset += len; + } + + if (offset != 8) + fprintf(stderr, "virtio-net: failed to notify tx thread\n"); +} +#endif + static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); +#ifdef USE_KVM + virtio_queue_set_notification(n->tx_vq, 0); + virtio_net_notify_tx(n->fds[0]); +#else if (n->tx_timer_active) { virtio_queue_set_notification(vq, 1); qemu_del_timer(n->tx_timer); @@ -585,6 +630,7 @@ static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq) n->tx_timer_active = 1; virtio_queue_set_notification(vq, 0); } +#endif } static void virtio_net_tx_timer(void *opaque) @@ -598,7 +644,9 @@ static void virtio_net_tx_timer(void *opaque) return; virtio_queue_set_notification(n->tx_vq, 1); +#ifndef USE_KVM virtio_net_flush_tx(n, n->tx_vq); +#endif } /* @@ -712,6 +760,15 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn) virtio_net_receive, virtio_net_can_receive, n); n->vc->link_status_changed = virtio_net_set_link_status; +#ifdef USE_KVM + if (qemu_eventfd(n->fds) == -1) { + fprintf(stderr, "failed to setup virtio-net eventfd\n"); + return NULL; + } + + qemu_set_fd_handler2(n->fds[0], NULL, virtio_net_do_tx, NULL, (void *)n); +#endif + qemu_format_nic_info_str(n->vc, n->mac); n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html