From: Jason Wang <jasowang@xxxxxxxxxx> commit e2412c07f8f3040593dfb88207865a3cd58680c0 upstream. When the rx buffer is too small for a packet, we will discard the vq descriptor and retry it for the next packet: while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, &busyloop_intr))) { ... /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); err = sock->ops->recvmsg(sock, &msg, 1, MSG_DONTWAIT | MSG_TRUNC); pr_debug("Discarded rx packet: len %zd\n", sock_len); continue; } ... } This makes it possible to trigger a infinite while..continue loop through the co-opreation of two VMs like: 1) Malicious VM1 allocate 1 byte rx buffer and try to slow down the vhost process as much as possible e.g using indirect descriptors or other. 2) Malicious VM2 generate packets to VM1 as fast as possible Fixing this by checking against weight at the end of RX and TX loop. This also eliminate other similar cases when: - userspace is consuming the packets in the meanwhile - theoretical TOCTOU attack if guest moving avail index back and forth to hit the continue after vhost find guest just add new buffers This addresses CVE-2019-3900. Fixes: d8316f3991d20 ("vhost: fix total length when packets are too short") Fixes: 3a4d5c94e9593 ("vhost_net: a kernel-level virtio server") Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx> Reviewed-by: Stefan Hajnoczi <stefanha@xxxxxxxxxx> Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx> [jwang: backport to 4.19] Signed-off-by: Jack Wang <jinpu.wang@xxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/vhost/net.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -551,7 +551,7 @@ static void handle_tx_copy(struct vhost_ int err; int sent_pkts = 0; - for (;;) { + do { bool busyloop_intr = false; head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, @@ -592,9 +592,7 @@ static void handle_tx_copy(struct vhost_ err, len); if (++nvq->done_idx >= VHOST_NET_BATCH) vhost_net_signal_used(nvq); - if (vhost_exceeds_weight(vq, ++sent_pkts, total_len)) - break; - } + } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); vhost_net_signal_used(nvq); } @@ -618,7 +616,7 @@ static void handle_tx_zerocopy(struct vh bool zcopy_used; int sent_pkts = 0; - for (;;) { + do { bool busyloop_intr; /* Release DMAs done buffers first */ @@ -693,10 +691,7 @@ static void handle_tx_zerocopy(struct vh else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(vhost_exceeds_weight(vq, ++sent_pkts, - total_len))) - break; - } + } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); } /* Expects to be always run from workqueue - which acts as @@ -932,8 +927,11 @@ static void handle_rx(struct vhost_net * vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); - while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, - &busyloop_intr))) { + do { + sock_len = vhost_net_rx_peek_head_len(net, sock->sk, + &busyloop_intr); + if (!sock_len) + break; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -1018,12 +1016,11 @@ static void handle_rx(struct vhost_net * vhost_log_write(vq, vq_log, log, vhost_len, vq->iov, in); total_len += vhost_len; - if (unlikely(vhost_exceeds_weight(vq, ++recv_pkts, total_len))) - goto out; - } + } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); + if (unlikely(busyloop_intr)) vhost_poll_queue(&vq->poll); - else + else if (!sock_len) vhost_net_enable_vq(net, vq); out: vhost_net_signal_used(nvq); @@ -1105,7 +1102,7 @@ static int vhost_net_open(struct inode * } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, UIO_MAXIOV + VHOST_NET_BATCH, - VHOST_NET_WEIGHT, VHOST_NET_PKT_WEIGHT); + VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);