linux-next: manual merge of the virtio tree with the net-next tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Rusty,

Today's linux-next merge of the virtio tree got a conflict in
drivers/net/virtio_net.c between commit e9d7417b97f4 ("virtio-net:
separate fields of sending/receiving queue from virtnet_info") and
986a4f4d452d ("virtio_net: multiqueue support") from the net-next tree
and commit a89f05573fa2 ("virtio-net: remove unused skb_vnet_hdr->num_sg
field"), 2c6d439a7316 ("virtio-net: correct capacity math on ring full"),
e794093a52cd ("virtio_net: don't rely on virtqueue_add_buf() returning
capacity") and 7dc5f95d9b6c ("virtio: net: make it clear that
virtqueue_add_buf() no longer returns > 0") from the virtio tree.

I fixed it up (I think - see below) and can carry the fix as necessary
(no action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@xxxxxxxxxxxxxxxx

diff --cc drivers/net/virtio_net.c
index a644eeb,6289891..0000000
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@@ -523,20 -464,21 +522,21 @@@ static bool try_fill_recv(struct receiv
  
  	do {
  		if (vi->mergeable_rx_bufs)
 -			err = add_recvbuf_mergeable(vi, gfp);
 +			err = add_recvbuf_mergeable(rq, gfp);
  		else if (vi->big_packets)
 -			err = add_recvbuf_big(vi, gfp);
 +			err = add_recvbuf_big(rq, gfp);
  		else
 -			err = add_recvbuf_small(vi, gfp);
 +			err = add_recvbuf_small(rq, gfp);
  
  		oom = err == -ENOMEM;
- 		if (err < 0)
+ 		if (err)
  			break;
 -		++vi->num;
 -	} while (vi->rvq->num_free);
 +		++rq->num;
- 	} while (err > 0);
++	} while (rq->vq->num_free);
+ 
 -	if (unlikely(vi->num > vi->max))
 -		vi->max = vi->num;
 -	virtqueue_kick(vi->rvq);
 +	if (unlikely(rq->num > rq->max))
 +		rq->max = rq->num;
 +	virtqueue_kick(rq->vq);
  	return !oom;
  }
  
@@@ -625,29 -557,13 +625,29 @@@ again
  	return received;
  }
  
 -static void free_old_xmit_skbs(struct virtnet_info *vi)
 +static int virtnet_open(struct net_device *dev)
 +{
 +	struct virtnet_info *vi = netdev_priv(dev);
 +	int i;
 +
 +	for (i = 0; i < vi->max_queue_pairs; i++) {
 +		/* Make sure we have some buffers: if oom use wq. */
 +		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 +			schedule_delayed_work(&vi->refill, 0);
 +		virtnet_napi_enable(&vi->rq[i]);
 +	}
 +
 +	return 0;
 +}
 +
- static unsigned int free_old_xmit_skbs(struct send_queue *sq)
++static void free_old_xmit_skbs(struct send_queue *sq)
  {
  	struct sk_buff *skb;
- 	unsigned int len, tot_sgs = 0;
+ 	unsigned int len;
 +	struct virtnet_info *vi = sq->vq->vdev->priv;
  	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
  
 -	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
 +	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
  		pr_debug("Sent skb %p\n", skb);
  
  		u64_stats_update_begin(&stats->tx_syncp);
@@@ -655,17 -571,15 +655,16 @@@
  		stats->tx_packets++;
  		u64_stats_update_end(&stats->tx_syncp);
  
- 		tot_sgs += skb_vnet_hdr(skb)->num_sg;
  		dev_kfree_skb_any(skb);
  	}
- 	return tot_sgs;
  }
  
 -static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 +static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
  {
  	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
  	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+ 	unsigned num_sg;
 +	struct virtnet_info *vi = sq->vq->vdev->priv;
  
  	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
  
@@@ -700,42 -614,32 +699,35 @@@
  
  	/* Encode metadata header at front. */
  	if (vi->mergeable_rx_bufs)
 -		sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
 +		sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
  	else
 -		sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
 +		sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
  
- 	hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
- 	return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
 -	num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
 -	return virtqueue_add_buf(vi->svq, vi->tx_sg, num_sg,
++	num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
++	return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
  				 0, skb, GFP_ATOMIC);
  }
  
  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
  	struct virtnet_info *vi = netdev_priv(dev);
 +	int qnum = skb_get_queue_mapping(skb);
 +	struct send_queue *sq = &vi->sq[qnum];
- 	int capacity;
+ 	int err;
  
  	/* Free up any pending old buffers before queueing new ones. */
 -	free_old_xmit_skbs(vi);
 +	free_old_xmit_skbs(sq);
  
  	/* Try to transmit */
- 	capacity = xmit_skb(sq, skb);
- 
- 	/* This can happen with OOM and indirect buffers. */
- 	if (unlikely(capacity < 0)) {
- 		if (likely(capacity == -ENOMEM)) {
- 			if (net_ratelimit())
- 				dev_warn(&dev->dev,
- 					 "TXQ (%d) failure: out of memory\n",
- 					 qnum);
- 		} else {
- 			dev->stats.tx_fifo_errors++;
- 			if (net_ratelimit())
- 				dev_warn(&dev->dev,
- 					 "Unexpected TXQ (%d) failure: %d\n",
- 					 qnum, capacity);
- 		}
 -	err = xmit_skb(vi, skb);
++	err = xmit_skb(sq, skb);
+ 
+ 	/* This should not happen! */
+ 	if (unlikely(err)) {
+ 		dev->stats.tx_fifo_errors++;
+ 		if (net_ratelimit())
+ 			dev_warn(&dev->dev,
 -				 "Unexpected TX queue failure: %d\n", err);
++				 "Unexpected TXQ (%d) failure: %d\n",
++				 qnum, err);
  		dev->stats.tx_dropped++;
  		kfree_skb(skb);
  		return NETDEV_TX_OK;
@@@ -748,14 -652,14 +740,13 @@@
  
  	/* Apparently nice girls don't return TX_BUSY; stop the queue
  	 * before it gets out of hand.  Naturally, this wastes entries. */
- 	if (capacity < 2+MAX_SKB_FRAGS) {
 -	if (vi->svq->num_free < 2+MAX_SKB_FRAGS) {
 -		netif_stop_queue(dev);
 -		if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
++	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 +		netif_stop_subqueue(dev, qnum);
 +		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
  			/* More just got used, free them then recheck. */
- 			capacity += free_old_xmit_skbs(sq);
- 			if (capacity >= 2+MAX_SKB_FRAGS) {
 -			free_old_xmit_skbs(vi);
 -			if (vi->svq->num_free >= 2+MAX_SKB_FRAGS) {
 -				netif_start_queue(dev);
 -				virtqueue_disable_cb(vi->svq);
++			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 +				netif_start_subqueue(dev, qnum);
 +				virtqueue_disable_cb(sq->vq);
  			}
  		}
  	}

Attachment: pgpBvRIvm5s22.pgp
Description: PGP signature


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux