Re: [PATCH 3/7] vdpa/mlx5: Support configuring max data virtqueue pairs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 12/7/2021 12:19 AM, Eli Cohen wrote:
On Thu, Dec 02, 2021 at 11:28:12PM -0800, Si-Wei Liu wrote:

On 12/1/2021 11:57 AM, Eli Cohen wrote:
Check whether the max number of data virtqueue pairs was provided when a
adding a new device and verify the new value does not exceed device
capabilities.

In addition, change the arrays holding virtqueue and callback contexts
to be dynamically allocated.

Signed-off-by: Eli Cohen <elic@xxxxxxxxxx>
---
   drivers/vdpa/mlx5/net/mlx5_vnet.c | 33 ++++++++++++++++++++-----------
   1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index b66f41ccbac2..62aba5dbd8fa 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -131,11 +131,6 @@ struct mlx5_vdpa_virtqueue {
   	struct mlx5_vq_restore_info ri;
   };
-/* We will remove this limitation once mlx5_vdpa_alloc_resources()
- * provides for driver space allocation
Is this comment outdated, i.e. driver space allocation in
mlx5_vdpa_alloc_resources() already provided?

Not sure I follow. The comment was removed in this patch since we no
longer depend on MLX5_MAX_SUPPORTED_VQS and rather use dynamic
allocations.
- */
-#define MLX5_MAX_SUPPORTED_VQS 16
-
   static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
   {
   	if (unlikely(idx > mvdev->max_idx))
@@ -148,8 +143,8 @@ struct mlx5_vdpa_net {
   	struct mlx5_vdpa_dev mvdev;
   	struct mlx5_vdpa_net_resources res;
   	struct virtio_net_config config;
-	struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
-	struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
+	struct mlx5_vdpa_virtqueue *vqs;
+	struct vdpa_callback *event_cbs;
   	/* Serialize vq resources creation and destruction. This is required
   	 * since memory map might change and we need to destroy and create
@@ -1218,7 +1213,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
   {
   	int i;
-	for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+	for (i = 0; i < ndev->mvdev.max_vqs; i++)
   		suspend_vq(ndev, &ndev->vqs[i]);
   }
@@ -1245,7 +1240,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
   	int i, j;
   	int err;
-	max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+	max_rqt = min_t(int, ndev->mvdev.max_vqs  / 2,
   			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
   	if (max_rqt < 1)
   		return -EOPNOTSUPP;
@@ -2235,7 +2230,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
   	clear_vqs_ready(ndev);
   	mlx5_vdpa_destroy_mr(&ndev->mvdev);
   	ndev->mvdev.status = 0;
-	memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+	memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
   	ndev->mvdev.actual_features = 0;
   	++mvdev->generation;
   	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
@@ -2308,6 +2303,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
   	}
   	mlx5_vdpa_free_resources(&ndev->mvdev);
   	mutex_destroy(&ndev->reslock);
+	kfree(ndev->event_cbs);
+	kfree(ndev->vqs);
   }
   static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
@@ -2547,13 +2544,24 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
   	/* we save one virtqueue for control virtqueue should we require it */
   	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
-	max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+	if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
+		if (add_config->max_vq_pairs & (add_config->max_vq_pairs - 1) ||
+		    add_config->max_vq_pairs > max_vqs / 2)
+			return -EINVAL;
It'd be the best to describe the failing cause here, the power of 2
limitation is known to me, but not friendly enough for first time user.
dev_warn would work for me.
I could add a warning but if some test script does this plenty of times
you will clutter dmesg. You do fail if you provide a non power of two
value.
You could pick dev_warn_once() and fix other similar dev_warn() usage in the same function. But I do wonder why your firmware has this power-of-2 limitation for the number of data queues. Are you going to remove this limitation by working around it in driver? I thought only queue size has such power-of-2 limitation.

Thanks,
-Siwei
+		max_vqs = min_t(u32, max_vqs, 2 * add_config->max_vq_pairs);
+	}
   	ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
   				 name, false);
   	if (IS_ERR(ndev))
   		return PTR_ERR(ndev);
+	ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+	ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+	if (!ndev->vqs || !ndev->event_cbs) {
+		err = -ENOMEM;
+		goto err_mtu;
Not a good idea to call mutex_destroy() without calling mutex_init() ahead.
Introduce another err label before put_device()?
Thanks, will fix.
-Siwei

+	}
   	ndev->mvdev.max_vqs = max_vqs;
   	mvdev = &ndev->mvdev;
   	mvdev->mdev = mdev;
@@ -2676,7 +2684,8 @@ static int mlx5v_probe(struct auxiliary_device *adev,
   	mgtdev->mgtdev.ops = &mdev_ops;
   	mgtdev->mgtdev.device = mdev->device;
   	mgtdev->mgtdev.id_table = id_table;
-	mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
+	mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
+					  BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
   	mgtdev->madev = madev;
   	err = vdpa_mgmtdev_register(&mgtdev->mgtdev);

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization



[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux