-----Original Message-----
From: Yishai Hadas [mailto:yishaih@xxxxxxxxxx]
Sent: 13 October 2021 10:47
To: alex.williamson@xxxxxxxxxx; bhelgaas@xxxxxxxxxx; jgg@xxxxxxxxxx;
saeedm@xxxxxxxxxx
Cc: linux-pci@xxxxxxxxxxxxxxx; kvm@xxxxxxxxxxxxxxx; netdev@xxxxxxxxxxxxxxx;
kuba@xxxxxxxxxx; leonro@xxxxxxxxxx; kwankhede@xxxxxxxxxx;
mgurtovoy@xxxxxxxxxx; yishaih@xxxxxxxxxx; maorg@xxxxxxxxxx
Subject: [PATCH V1 mlx5-next 11/13] vfio/mlx5: Implement vfio_pci driver for
mlx5 devices
This patch adds support for vfio_pci driver for mlx5 devices.
It uses vfio_pci_core to register to the VFIO subsystem and then
implements the mlx5 specific logic in the migration area.
The migration implementation follows the definition from uapi/vfio.h and
uses the mlx5 VF->PF command channel to achieve it.
This patch implements the suspend/resume flows.
Signed-off-by: Yishai Hadas <yishaih@xxxxxxxxxx>
Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx>
---
MAINTAINERS | 6 +
drivers/vfio/pci/Kconfig | 3 +
drivers/vfio/pci/Makefile | 2 +
drivers/vfio/pci/mlx5/Kconfig | 11 +
drivers/vfio/pci/mlx5/Makefile | 4 +
drivers/vfio/pci/mlx5/main.c | 692 +++++++++++++++++++++++++++++++++
6 files changed, 718 insertions(+)
create mode 100644 drivers/vfio/pci/mlx5/Kconfig
create mode 100644 drivers/vfio/pci/mlx5/Makefile
create mode 100644 drivers/vfio/pci/mlx5/main.c
diff --git a/MAINTAINERS b/MAINTAINERS
index abdcbcfef73d..e824bfab4a01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19699,6 +19699,12 @@ L: kvm@xxxxxxxxxxxxxxx
S: Maintained
F: drivers/vfio/platform/
+VFIO MLX5 PCI DRIVER
+M: Yishai Hadas <yishaih@xxxxxxxxxx>
+L: kvm@xxxxxxxxxxxxxxx
+S: Maintained
+F: drivers/vfio/pci/mlx5/
+
VGA_SWITCHEROO
R: Lukas Wunner <lukas@xxxxxxxxx>
S: Maintained
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 860424ccda1b..187b9c259944 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -43,4 +43,7 @@ config VFIO_PCI_IGD
To enable Intel IGD assignment through vfio-pci, say Y.
endif
+
+source "drivers/vfio/pci/mlx5/Kconfig"
+
endif
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 349d68d242b4..ed9d6f2e0555 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
vfio-pci-y := vfio_pci.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
+
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig
new file mode 100644
index 000000000000..a3ce00add4fe
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MLX5_VFIO_PCI
+ tristate "VFIO support for MLX5 PCI devices"
+ depends on MLX5_CORE
+ select VFIO_PCI_CORE
+ help
+ This provides a PCI support for MLX5 devices using the VFIO
+ framework. The device specific driver supports suspend/resume
+ of the MLX5 device.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/mlx5/Makefile b/drivers/vfio/pci/mlx5/Makefile
new file mode 100644
index 000000000000..689627da7ff5
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5-vfio-pci.o
+mlx5-vfio-pci-y := main.o cmd.o
+
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
new file mode 100644
index 000000000000..e36302b444a6
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/vfio_pci_core.h>
+
+#include "cmd.h"
+
+enum {
+ MLX5VF_PCI_FREEZED = 1 << 0,
+};
+
+enum {
+ MLX5VF_REGION_PENDING_BYTES = 1 << 0,
+ MLX5VF_REGION_DATA_SIZE = 1 << 1,
+};
+
+#define MLX5VF_MIG_REGION_DATA_SIZE SZ_128K
+/* Data section offset from migration region */
+#define MLX5VF_MIG_REGION_DATA_OFFSET
\
+ (sizeof(struct vfio_device_migration_info))
+
+#define VFIO_DEVICE_MIGRATION_OFFSET(x)
\
+ (offsetof(struct vfio_device_migration_info, x))
+
+struct mlx5vf_pci_migration_info {
+ u32 vfio_dev_state; /* VFIO_DEVICE_STATE_XXX */
+ u32 dev_state; /* device migration state */
+ u32 region_state; /* Use MLX5VF_REGION_XXX */
+ u16 vhca_id;
+ struct mlx5_vhca_state_data vhca_state_data;
+};
+
+struct mlx5vf_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ u8 migrate_cap:1;
+ /* protect migartion state */
+ struct mutex state_mutex;
+ struct mlx5vf_pci_migration_info vmig;
+};
+
+static int mlx5vf_pci_unquiesce_device(struct mlx5vf_pci_core_device
*mvdev)
+{
+ return mlx5vf_cmd_resume_vhca(mvdev->core_device.pdev,
+ mvdev->vmig.vhca_id,
+
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_MASTER);
+}
+
+static int mlx5vf_pci_quiesce_device(struct mlx5vf_pci_core_device *mvdev)
+{
+ return mlx5vf_cmd_suspend_vhca(
+ mvdev->core_device.pdev, mvdev->vmig.vhca_id,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_MASTER);
+}
+
+static int mlx5vf_pci_unfreeze_device(struct mlx5vf_pci_core_device
*mvdev)
+{
+ int ret;
+
+ ret = mlx5vf_cmd_resume_vhca(mvdev->core_device.pdev,
+ mvdev->vmig.vhca_id,
+
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_SLAVE);
+ if (ret)
+ return ret;
+
+ mvdev->vmig.dev_state &= ~MLX5VF_PCI_FREEZED;
+ return 0;
+}
+
+static int mlx5vf_pci_freeze_device(struct mlx5vf_pci_core_device *mvdev)
+{
+ int ret;
+
+ ret = mlx5vf_cmd_suspend_vhca(
+ mvdev->core_device.pdev, mvdev->vmig.vhca_id,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_SLAVE);
+ if (ret)
+ return ret;
+
+ mvdev->vmig.dev_state |= MLX5VF_PCI_FREEZED;
+ return 0;
+}
+
+static int mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device
*mvdev)
+{
+ u32 state_size = 0;
+ int ret;
+
+ if (!(mvdev->vmig.dev_state & MLX5VF_PCI_FREEZED))
+ return -EFAULT;
+
+ /* If we already read state no reason to re-read */
+ if (mvdev->vmig.vhca_state_data.state_size)
+ return 0;
+
+ ret = mlx5vf_cmd_query_vhca_migration_state(
+ mvdev->core_device.pdev, mvdev->vmig.vhca_id, &state_size);
+ if (ret)
+ return ret;
+
+ return mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev,
+ mvdev->vmig.vhca_id, state_size,
+ &mvdev->vmig.vhca_state_data);
+}
+
+static int mlx5vf_pci_new_write_window(struct mlx5vf_pci_core_device
*mvdev)
+{
+ struct mlx5_vhca_state_data *state_data =
&mvdev->vmig.vhca_state_data;
+ u32 num_pages_needed;
+ u64 allocated_ready;
+ u32 bytes_needed;
+
+ /* Check how many bytes are available from previous flows */
+ WARN_ON(state_data->num_pages * PAGE_SIZE <
+ state_data->win_start_offset);
+ allocated_ready = (state_data->num_pages * PAGE_SIZE) -
+ state_data->win_start_offset;
+ WARN_ON(allocated_ready > MLX5VF_MIG_REGION_DATA_SIZE);
+
+ bytes_needed = MLX5VF_MIG_REGION_DATA_SIZE - allocated_ready;
+ if (!bytes_needed)
+ return 0;
+
+ num_pages_needed = DIV_ROUND_UP_ULL(bytes_needed, PAGE_SIZE);
+ return mlx5vf_add_migration_pages(state_data, num_pages_needed);
+}
+
+static ssize_t
+mlx5vf_pci_handle_migration_data_size(struct mlx5vf_pci_core_device
*mvdev,
+ char __user *buf, bool iswrite)
+{
+ struct mlx5vf_pci_migration_info *vmig = &mvdev->vmig;
+ u64 data_size;
+ int ret;
+
+ if (iswrite) {
+ /* data_size is writable only during resuming state */
+ if (vmig->vfio_dev_state != VFIO_DEVICE_STATE_RESUMING)
+ return -EINVAL;
+
+ ret = copy_from_user(&data_size, buf, sizeof(data_size));
+ if (ret)
+ return -EFAULT;
+
+ vmig->vhca_state_data.state_size += data_size;
+ vmig->vhca_state_data.win_start_offset += data_size;
+ ret = mlx5vf_pci_new_write_window(mvdev);
+ if (ret)
+ return ret;
+
+ } else {
+ if (vmig->vfio_dev_state != VFIO_DEVICE_STATE_SAVING)
+ return -EINVAL;
+
+ data_size = min_t(u64, MLX5VF_MIG_REGION_DATA_SIZE,
+ vmig->vhca_state_data.state_size -
+ vmig->vhca_state_data.win_start_offset);
+ ret = copy_to_user(buf, &data_size, sizeof(data_size));
+ if (ret)
+ return -EFAULT;
+ }
+
+ vmig->region_state |= MLX5VF_REGION_DATA_SIZE;
+ return sizeof(data_size);
+}
+
+static ssize_t
+mlx5vf_pci_handle_migration_data_offset(struct mlx5vf_pci_core_device
*mvdev,
+ char __user *buf, bool iswrite)
+{
+ static const u64 data_offset = MLX5VF_MIG_REGION_DATA_OFFSET;
+ int ret;
+
+ /* RO field */
+ if (iswrite)
+ return -EFAULT;
+
+ ret = copy_to_user(buf, &data_offset, sizeof(data_offset));
+ if (ret)
+ return -EFAULT;
+
+ return sizeof(data_offset);
+}
+
+static ssize_t
+mlx5vf_pci_handle_migration_pending_bytes(struct mlx5vf_pci_core_device
*mvdev,
+ char __user *buf, bool iswrite)
+{
+ struct mlx5vf_pci_migration_info *vmig = &mvdev->vmig;
+ u64 pending_bytes;
+ int ret;
+
+ /* RO field */
+ if (iswrite)
+ return -EFAULT;
+
+ if (vmig->vfio_dev_state == (VFIO_DEVICE_STATE_SAVING |
+ VFIO_DEVICE_STATE_RUNNING)) {
+ /* In pre-copy state we have no data to return for now,
+ * return 0 pending bytes
+ */
+ pending_bytes = 0;
+ } else {
+ if (!vmig->vhca_state_data.state_size)
+ return 0;
+ pending_bytes = vmig->vhca_state_data.state_size -
+ vmig->vhca_state_data.win_start_offset;
+ }
+
+ ret = copy_to_user(buf, &pending_bytes, sizeof(pending_bytes));
+ if (ret)
+ return -EFAULT;
+
+ /* Window moves forward once data from previous iteration was read */
+ if (vmig->region_state & MLX5VF_REGION_DATA_SIZE)
+ vmig->vhca_state_data.win_start_offset +=
+ min_t(u64, MLX5VF_MIG_REGION_DATA_SIZE, pending_bytes);
+
+ WARN_ON(vmig->vhca_state_data.win_start_offset >
+ vmig->vhca_state_data.state_size);
+
+ /* New iteration started */
+ vmig->region_state = MLX5VF_REGION_PENDING_BYTES;
+ return sizeof(pending_bytes);
+}
+
+static int mlx5vf_load_state(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->vmig.vhca_state_data.state_size)
+ return 0;
+
+ return mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev,
+ mvdev->vmig.vhca_id,
+ &mvdev->vmig.vhca_state_data);
+}
+
+static void mlx5vf_reset_mig_state(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5vf_pci_migration_info *vmig = &mvdev->vmig;
+
+ vmig->region_state = 0;
+ mlx5vf_reset_vhca_state(&vmig->vhca_state_data);
+}
+
+static int mlx5vf_pci_set_device_state(struct mlx5vf_pci_core_device
*mvdev,
+ u32 state)
+{
+ struct mlx5vf_pci_migration_info *vmig = &mvdev->vmig;
+ u32 old_state = vmig->vfio_dev_state;
+ int ret = 0;
+
+ if (vfio_is_state_invalid(state) || vfio_is_state_invalid(old_state))
+ return -EINVAL;
+
+ /* Running switches off */
+ if ((old_state & VFIO_DEVICE_STATE_RUNNING) !=
+ (state & VFIO_DEVICE_STATE_RUNNING) &&
+ (old_state & VFIO_DEVICE_STATE_RUNNING)) {
+ ret = mlx5vf_pci_quiesce_device(mvdev);
+ if (ret)
+ return ret;
+ ret = mlx5vf_pci_freeze_device(mvdev);
+ if (ret) {
+ vmig->vfio_dev_state = VFIO_DEVICE_STATE_INVALID;
+ return ret;
+ }
+ }
+
+ /* Resuming switches off */
+ if ((old_state & VFIO_DEVICE_STATE_RESUMING) !=
+ (state & VFIO_DEVICE_STATE_RESUMING) &&
+ (old_state & VFIO_DEVICE_STATE_RESUMING)) {
+ /* deserialize state into the device */
+ ret = mlx5vf_load_state(mvdev);
+ if (ret) {
+ vmig->vfio_dev_state = VFIO_DEVICE_STATE_INVALID;
+ return ret;
+ }
+ }
+
+ /* Resuming switches on */
+ if ((old_state & VFIO_DEVICE_STATE_RESUMING) !=
+ (state & VFIO_DEVICE_STATE_RESUMING) &&
+ (state & VFIO_DEVICE_STATE_RESUMING)) {
+ mlx5vf_reset_mig_state(mvdev);
+ ret = mlx5vf_pci_new_write_window(mvdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Saving switches on */
+ if ((old_state & VFIO_DEVICE_STATE_SAVING) !=
+ (state & VFIO_DEVICE_STATE_SAVING) &&
+ (state & VFIO_DEVICE_STATE_SAVING)) {
+ if (!(state & VFIO_DEVICE_STATE_RUNNING)) {
+ /* serialize post copy */
+ ret = mlx5vf_pci_save_device_data(mvdev);