RE: [PATCH v9 5/9] hisi_acc_vfio_pci: Restrict access to VF dev BAR2 migration region

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> From: Shameer Kolothum <shameerali.kolothum.thodi@xxxxxxxxxx>
> Sent: Wednesday, March 9, 2022 2:49 AM
> 
> HiSilicon ACC VF device BAR2 region consists of both functional register
> space and migration control register space. Unnecessarily exposing the
> migration BAR region to the Guest has the potential to prevent/corrupt
> the Guest migration.
> 
> Hence, introduce a separate struct vfio_device_ops for migration support
> which will override the ioctl/read/write/mmap methods to hide the
> migration region and limit the Guest access only to the functional
> register space.
> 
> This will be used in subsequent patches when we add migration support
> to the driver.
> 
> Please note that it is OK to export the entire VF BAR if migration is
> not supported or required as this cannot affect the PF configurations.
> 
> Reviewed-by: Longfang Liu <liulongfang@xxxxxxxxxx>
> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@xxxxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c    | 126 ++++++++++++++++++
>  1 file changed, 126 insertions(+)
> 
> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> index 8129c3457b3b..582ee4fa4109 100644
> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> @@ -13,6 +13,119 @@
>  #include <linux/vfio.h>
>  #include <linux/vfio_pci_core.h>
> 
> +static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
> +					size_t count, loff_t *ppos,
> +					size_t *new_count)
> +{
> +	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
> +	struct vfio_pci_core_device *vdev =
> +		container_of(core_vdev, struct vfio_pci_core_device, vdev);
> +
> +	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
> +		loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
> +		resource_size_t end = pci_resource_len(vdev->pdev, index) /
> 2;
> +
> +		/* Check if access is for migration control region */
> +		if (pos >= end)
> +			return -EINVAL;
> +
> +		*new_count = min(count, (size_t)(end - pos));
> +	}
> +
> +	return 0;
> +}
> +
> +static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
> +				  struct vm_area_struct *vma)
> +{
> +	struct vfio_pci_core_device *vdev =
> +		container_of(core_vdev, struct vfio_pci_core_device, vdev);
> +	unsigned int index;
> +
> +	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
> +	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
> +		u64 req_len, pgoff, req_start;
> +		resource_size_t end = pci_resource_len(vdev->pdev, index) /
> 2;
> +
> +		req_len = vma->vm_end - vma->vm_start;
> +		pgoff = vma->vm_pgoff &
> +			((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
> +		req_start = pgoff << PAGE_SHIFT;
> +
> +		if (req_start + req_len > end)
> +			return -EINVAL;
> +	}
> +
> +	return vfio_pci_core_mmap(core_vdev, vma);
> +}
> +
> +static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
> +				       const char __user *buf, size_t count,
> +				       loff_t *ppos)
> +{
> +	size_t new_count = count;
> +	int ret;
> +
> +	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos,
> &new_count);
> +	if (ret)
> +		return ret;
> +
> +	return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
> +}
> +
> +static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
> +				      char __user *buf, size_t count,
> +				      loff_t *ppos)
> +{
> +	size_t new_count = count;
> +	int ret;
> +
> +	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos,
> &new_count);
> +	if (ret)
> +		return ret;
> +
> +	return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
> +}
> +
> +static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned
> int cmd,
> +				    unsigned long arg)
> +{
> +	if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
> +		struct vfio_pci_core_device *vdev =
> +			container_of(core_vdev, struct vfio_pci_core_device,
> vdev);
> +		struct pci_dev *pdev = vdev->pdev;
> +		struct vfio_region_info info;
> +		unsigned long minsz;
> +
> +		minsz = offsetofend(struct vfio_region_info, offset);
> +
> +		if (copy_from_user(&info, (void __user *)arg, minsz))
> +			return -EFAULT;
> +
> +		if (info.argsz < minsz)
> +			return -EINVAL;
> +
> +		if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
> +			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
> +
> +			/*
> +			 * ACC VF dev BAR2 region consists of both functional
> +			 * register space and migration control register space.
> +			 * Report only the functional region to Guest.
> +			 */
> +			info.size = pci_resource_len(pdev, info.index) / 2;
> +
> +			info.flags = VFIO_REGION_INFO_FLAG_READ |
> +					VFIO_REGION_INFO_FLAG_WRITE |
> +					VFIO_REGION_INFO_FLAG_MMAP;
> +
> +			return copy_to_user((void __user *)arg, &info,
> minsz) ?
> +					    -EFAULT : 0;
> +		}
> +	}
> +	return vfio_pci_core_ioctl(core_vdev, cmd, arg);
> +}
> +
>  static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
>  {
>  	struct vfio_pci_core_device *vdev =
> @@ -28,6 +141,19 @@ static int hisi_acc_vfio_pci_open_device(struct
> vfio_device *core_vdev)
>  	return 0;
>  }
> 
> +static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
> +	.name = "hisi-acc-vfio-pci-migration",
> +	.open_device = hisi_acc_vfio_pci_open_device,
> +	.close_device = vfio_pci_core_close_device,
> +	.ioctl = hisi_acc_vfio_pci_ioctl,
> +	.device_feature = vfio_pci_core_ioctl_feature,
> +	.read = hisi_acc_vfio_pci_read,
> +	.write = hisi_acc_vfio_pci_write,
> +	.mmap = hisi_acc_vfio_pci_mmap,
> +	.request = vfio_pci_core_request,
> +	.match = vfio_pci_core_match,
> +};
> +
>  static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
>  	.name = "hisi-acc-vfio-pci",
>  	.open_device = hisi_acc_vfio_pci_open_device,
> --
> 2.25.1





[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux