Re: [PATCH v18 3/3] vfio/nvgrace-gpu: Add vfio pci variant module for grace hopper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Thanks Kevin and Yishai for the reviews. Comments inline.

>> +static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
>> +                         struct vm_area_struct *vma)
>> +{
>> +     struct nvgrace_gpu_pci_core_device *nvdev =
>> +             container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
>> +                          core_device.vdev);
>
> No need for a new line here.

Ack.

>> +static ssize_t
>> +nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev,
>> +                  char __user *buf, size_t count, loff_t *ppos)
>> +{
>> +     u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
>> +     unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
>> +     struct mem_region *memregion;
>> +     size_t mem_count, i;
>> +     u8 val = 0xFF;
>> +     int ret;
>> +
>> +     memregion = nvgrace_gpu_memregion(index, nvdev);
>> +     if (!memregion)
>
> Can that happen ? it was just tested by the caller.

Ok, I can remove it. Will put a comment instead that this has been checked.

>> +     /*
>> +      * Determine how many bytes to be actually read from the device memory.
>> +      * Read request beyond the actual device memory size is filled with ~0,
>> +      * while those beyond the actual reported size is skipped.
>> +      */
>> +     if (offset >= memregion->memlength)
>> +             mem_count = 0;
>> +     else
>> +             mem_count = min(count, memregion->memlength - (size_t)offset);
>> +
>> +     ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos);
>> +     if (ret)
>> +             return ret;
>> +
>> +     /*
>> +      * Only the device memory present on the hardware is mapped, which may
>> +      * not be power-of-2 aligned. A read to an offset beyond the device memory
>> +      * size is filled with ~0.
>> +      */
>> +     for (i = mem_count; i < count; i++)
>> +             put_user(val, (unsigned char __user *)(buf + i));
>
> Did you condier a failure here ?

Yeah, that has to be checked here. Will make the change in the next post.

>> +/*
>> + * Write count bytes to the device memory at a given offset. The actual device
>> + * memory size (available) may not be a power-of-2. So the driver fakes the
>> + * size to a power-of-2 (reported) when exposing to a user space driver.
>> + *
>> + * Writes extending beyond the reported size are truncated; writes starting
>> + * beyond the reported size generate -EINVAL.
>> + */
>> +static ssize_t
>> +nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev,
>> +                   size_t count, loff_t *ppos, const char __user *buf)
>> +{
>> +     unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
>> +     u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
>> +     struct mem_region *memregion;
>> +     size_t mem_count;
>> +     int ret = 0;
>> +
>> +     memregion = nvgrace_gpu_memregion(index, nvdev);
>> +     if (!memregion)
>
> Same as the above note in nvgrace_gpu_read_mem().

Ack.

>> +static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
>> +     .name           = "nvgrace-gpu-vfio-pci",
>> +     .init           = vfio_pci_core_init_dev,
>> +     .release        = vfio_pci_core_release_dev,
>> +     .open_device    = nvgrace_gpu_open_device,
>> +     .close_device   = nvgrace_gpu_close_device,
>> +     .ioctl          = nvgrace_gpu_ioctl,
>> +     .read           = nvgrace_gpu_read,
>> +     .write          = nvgrace_gpu_write,
>> +     .mmap           = nvgrace_gpu_mmap,
>> +     .request        = vfio_pci_core_request,
>> +     .match          = vfio_pci_core_match,
>> +     .bind_iommufd   = vfio_iommufd_physical_bind,
>> +     .unbind_iommufd = vfio_iommufd_physical_unbind,
>> +     .attach_ioas    = vfio_iommufd_physical_attach_ioas,
>> +     .detach_ioas    = vfio_iommufd_physical_detach_ioas,
>> +};
>> +
>> +static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
>> +     .name           = "nvgrace-gpu-vfio-pci-core",
>> +     .init           = vfio_pci_core_init_dev,
>> +     .release        = vfio_pci_core_release_dev,
>> +     .open_device    = nvgrace_gpu_open_device,
>> +     .close_device   = vfio_pci_core_close_device,
>> +     .ioctl          = vfio_pci_core_ioctl,
>> +     .device_feature = vfio_pci_core_ioctl_feature,
>
> This entry is missing above as part of nvgrace_gpu_pci_ops.
Yes. Will add.
>> +     .read           = vfio_pci_core_read,
>> +     .write          = vfio_pci_core_write,
>> +     .mmap           = vfio_pci_core_mmap,
>> +     .request        = vfio_pci_core_request,
>> +     .match          = vfio_pci_core_match,
>> +     .bind_iommufd   = vfio_iommufd_physical_bind,
>> +     .unbind_iommufd = vfio_iommufd_physical_unbind,
>> +     .attach_ioas    = vfio_iommufd_physical_attach_ioas,
>> +     .detach_ioas    = vfio_iommufd_physical_detach_ioas,
>> +};
>> +
>> +static struct
>> +nvgrace_gpu_pci_core_device *nvgrace_gpu_drvdata(struct pci_dev *pdev)
>> +{
>> +     struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
>> +
>> +     return container_of(core_device, struct nvgrace_gpu_pci_core_device,
>> +                         core_device);
>> +}
>
> The above function is called only once.
> You could just inline its first line (i.e. struct vfio_pci_core_device
> *core_device = dev_get_drvdata(&pdev->dev); and drop it.

True, will fix.

>> +
>> +     /*
>> +      * The USEMEM part of the device memory has to be MEMBLK_SIZE
>> +      * aligned. This is a hardwired ABI value between the GPU FW and
>> +      * VFIO driver. The VM device driver is also aware of it and make
>> +      * use of the value for its calculation to determine USEMEM size.
>> +      */
>> +     nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
>> +                                          MEMBLK_SIZE);
>> +     if ((check_add_overflow(nvdev->usemem.memphys,
>> +                             nvdev->usemem.memlength,
>> +                             &nvdev->resmem.memphys)) ||
>> +         (check_sub_overflow(memlength, nvdev->usemem.memlength,
>> +                             &nvdev->resmem.memlength))) {
>> +             ret = -EOVERFLOW;
>> +             goto done;
>> +     }
>> +
>> +     if (nvdev->usemem.memlength == 0) {
>> +             ret = -EINVAL;
>> +             goto done;
>> +     }
>> +
>
> Couldn't that check be done earlier in this function ?

Yes, will move it.

>> +
>> +MODULE_LICENSE("GPL");
>> +MODULE_AUTHOR("Ankit Agrawal <ankita@xxxxxxxxxx>");
>> +MODULE_AUTHOR("Aniket Agashe <aniketa@xxxxxxxxxx>");
>> +MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory");
>
> I'm not in the full details here, however, the construction of the
> variant driver looks OK, so:
>
> Reviewed-by: Yishai Hadas <yishaih@xxxxxxxxxx>

Thanks.




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux