Re: [RHEL7]virtio-net: switch to use XPS to choose txq

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 2019/12/20 下午7:48, Cindylu wrote:
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1769479
Upstream Status: 9bb8ca86075f37d3c169b9c46f8e7c6d3165e18f
Brew:  https://brewweb.devel.redhat.com/taskinfo?taskID=25482050
Tested: verified by customer

commit 9bb8ca86075f37d3c169b9c46f8e7c6d3165e18f
Author: Jason Wang <jasowang@xxxxxxxxxx>
Date:   Tue Nov 5 18:19:45 2013 +0800

     virtio-net: switch to use XPS to choose txq

     We used to use a percpu structure vq_index to record the cpu to queue
     mapping, this is suboptimal since it duplicates the work of XPS and
     loses all other XPS functionality such as allowing user to configure
     their own transmission steering strategy.

     So this patch switches to use XPS and suggest a default mapping when
     the number of cpus is equal to the number of queues. With XPS support,
     there's no need for keeping per-cpu vq_index and .ndo_select_queue(),
     so they were removed also.

     Cc: Rusty Russell <rusty@xxxxxxxxxxxxxxx>
     Cc: Michael S. Tsirkin <mst@xxxxxxxxxx>
     Acked-by: Rusty Russell <rusty@xxxxxxxxxxxxxxx>
     Acked-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
     Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx>
     Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
---
  drivers/net/virtio_net.c | 49 ++--------------------------------------
  1 file changed, 2 insertions(+), 47 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 61c64d92211..d5335774c60 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -125,9 +125,6 @@ struct virtnet_info {
  	/* Does the affinity hint is set for virtqueues? */
  	bool affinity_hint_set;
- /* Per-cpu variable to show the mapping from CPU to virtqueue */
-	int __percpu *vq_index;
-
  	/* CPU hot plug notifier */
  	struct notifier_block nb;
@@ -1124,7 +1121,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
  static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
  {
  	int i;
-	int cpu;
if (vi->affinity_hint_set) {
  		for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1134,16 +1130,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
vi->affinity_hint_set = false;
  	}
-
-	i = 0;
-	for_each_online_cpu(cpu) {
-		if (cpu == hcpu) {
-			*per_cpu_ptr(vi->vq_index, cpu) = -1;
-		} else {
-			*per_cpu_ptr(vi->vq_index, cpu) =
-				++i % vi->curr_queue_pairs;
-		}
-	}
  }
static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1165,7 +1151,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
  	for_each_online_cpu(cpu) {
  		virtqueue_set_affinity(vi->rq[i].vq, cpu);
  		virtqueue_set_affinity(vi->sq[i].vq, cpu);
-		*per_cpu_ptr(vi->vq_index, cpu) = i;
+		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
  		i++;
  	}
@@ -1268,29 +1254,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
  	.get_channels = virtnet_get_channels,
  };
-/* To avoid contending a lock hold by a vcpu who would exit to host, select the
- * txq based on the processor id.
- */
-static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb,
-			void *accel_priv, select_queue_fallback_t fallback)
-{
-	int txq;
-	struct virtnet_info *vi = netdev_priv(dev);
-
-	if (skb_rx_queue_recorded(skb)) {
-		txq = skb_get_rx_queue(skb);
-	} else {
-		txq = *__this_cpu_ptr(vi->vq_index);
-		if (txq == -1)
-			txq = 0;
-	}
-
-	while (unlikely(txq >= dev->real_num_tx_queues))
-		txq -= dev->real_num_tx_queues;
-
-	return txq;
-}
-
  static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
  				      size_t len)
  {
@@ -1317,7 +1280,6 @@ static const struct net_device_ops virtnet_netdev = {
  	.ndo_get_stats64     = virtnet_stats,
  	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
  	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-	.ndo_select_queue     = virtnet_select_queue,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  	.ndo_poll_controller = virtnet_netpoll,
  #endif
@@ -1642,10 +1604,6 @@ static int virtnet_probe(struct virtio_device *vdev)
  	if (vi->stats == NULL)
  		goto free;
- vi->vq_index = alloc_percpu(int);
-	if (vi->vq_index == NULL)
-		goto free_stats;
-
  	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
/* If we can receive ANY GSO packets, we must allocate large ones. */
@@ -1695,7 +1653,7 @@ static int virtnet_probe(struct virtio_device *vdev)
  	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
  	err = init_vqs(vi);
  	if (err)
-		goto free_index;
+		goto free_stats;
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
  	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
@@ -1764,8 +1722,6 @@ free_failover:
  free_vqs:
  	cancel_delayed_work_sync(&vi->refill);
  	virtnet_del_vqs(vi);
-free_index:
-	free_percpu(vi->vq_index);
  free_stats:
  	free_percpu(vi->stats);
  free:
@@ -1801,7 +1757,6 @@ static void virtnet_remove(struct virtio_device *vdev)
  	remove_vq_common(vi);
- free_percpu(vi->vq_index);
  	free_percpu(vi->stats);
  	free_netdev(vi->dev);
  }


Acked-by: Jason Wang <jasowang@xxxxxxxxxx>

Thanks

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux