Re: [PATCH v2 1/2] drm/virtio: Refactor job submission code path

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, Mar 19, 2023 at 9:11 AM Dmitry Osipenko
<dmitry.osipenko@xxxxxxxxxxxxx> wrote:
>
> Move virtio_gpu_execbuffer_ioctl() into separate virtgpu_submit.c file
> and refactor the code along the way to ease addition of new features to
> the ioctl.
>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@xxxxxxxxxxxxx>
> ---
>  drivers/gpu/drm/virtio/Makefile         |   2 +-
>  drivers/gpu/drm/virtio/virtgpu_drv.h    |   4 +
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c  | 182 ---------------
>  drivers/gpu/drm/virtio/virtgpu_submit.c | 298 ++++++++++++++++++++++++
>  4 files changed, 303 insertions(+), 183 deletions(-)
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_submit.c
>

<snip all the deletes>

> diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
> new file mode 100644
> index 000000000000..a96f9d3285c7
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
> @@ -0,0 +1,298 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright (C) 2015 Red Hat, Inc.
> + * All Rights Reserved.
> + *
> + * Authors:
> + *    Dave Airlie
> + *    Alon Levy
> + */
> +
> +#include <linux/file.h>
> +#include <linux/sync_file.h>
> +#include <linux/uaccess.h>
> +
> +#include <drm/drm_file.h>
> +#include <drm/virtgpu_drm.h>
> +
> +#include "virtgpu_drv.h"
> +
> +struct virtio_gpu_submit {
> +       struct virtio_gpu_object_array *buflist;
> +       struct drm_virtgpu_execbuffer *exbuf;
> +       struct virtio_gpu_fence *out_fence;
> +       struct virtio_gpu_fpriv *vfpriv;
> +       struct virtio_gpu_device *vgdev;
> +       struct drm_file *file;
> +       uint64_t fence_ctx;
> +       uint32_t ring_idx;
> +       int out_fence_fd;
> +       void *buf;
> +};
> +
> +static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
> +                                   struct dma_fence *dma_fence)
> +{
> +       uint32_t context = submit->fence_ctx + submit->ring_idx;
> +
> +       if (dma_fence_match_context(dma_fence, context))
> +               return 0;
> +
> +       return dma_fence_wait(dma_fence, true);
> +}
> +
> +static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
> +                                    struct dma_fence *fence)
> +{
> +       struct dma_fence *itr;
> +       int idx, err;
> +
> +       dma_fence_array_for_each(itr, idx, fence) {

I guess unwrapping is for the later step of host waits?

At any rate, I think you should use dma_fence_unwrap_for_each() to
handle the fence-chain case as well?

> +               err = virtio_gpu_do_fence_wait(submit, itr);
> +               if (err)
> +                       return err;
> +       }
> +
> +       return 0;
> +}
> +
> +static int virtio_gpu_fence_event_create(struct drm_device *dev,
> +                                        struct drm_file *file,
> +                                        struct virtio_gpu_fence *fence,
> +                                        uint32_t ring_idx)
> +{
> +       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> +       struct virtio_gpu_fence_event *e = NULL;
> +       int ret;
> +
> +       if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
> +               return 0;
> +
> +       e = kzalloc(sizeof(*e), GFP_KERNEL);
> +       if (!e)
> +               return -ENOMEM;
> +
> +       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
> +       e->event.length = sizeof(e->event);
> +
> +       ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
> +       if (ret) {
> +               kfree(e);
> +               return ret;
> +       }
> +
> +       fence->e = e;
> +
> +       return 0;
> +}
> +
> +static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
> +{
> +       struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
> +       uint32_t *bo_handles;
> +
> +       if (!exbuf->num_bo_handles)
> +               return 0;
> +
> +       bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(uint32_t),
> +                                   GFP_KERNEL);
> +       if (!bo_handles)
> +               return -ENOMEM;
> +
> +       if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
> +                          exbuf->num_bo_handles * sizeof(uint32_t))) {
> +               kvfree(bo_handles);
> +               return -EFAULT;
> +       }
> +
> +       submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
> +                                                       exbuf->num_bo_handles);
> +       if (!submit->buflist) {
> +               kvfree(bo_handles);
> +               return -ENOENT;
> +       }
> +
> +       kvfree(bo_handles);
> +
> +       return 0;
> +}
> +
> +static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
> +{
> +       if (!IS_ERR(submit->buf))
> +               kvfree(submit->buf);
> +
> +       if (submit->buflist)
> +               virtio_gpu_array_put_free(submit->buflist);
> +
> +       if (submit->out_fence_fd >= 0)
> +               put_unused_fd(submit->out_fence_fd);
> +}
> +
> +static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
> +{
> +       virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
> +                             submit->vfpriv->ctx_id, submit->buflist,
> +                             submit->out_fence);
> +       virtio_gpu_notify(submit->vgdev);
> +
> +       submit->buf = NULL;
> +       submit->buflist = NULL;
> +       submit->out_fence_fd = -1;
> +}
> +
> +static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
> +                                 struct drm_virtgpu_execbuffer *exbuf,
> +                                 struct drm_device *dev,
> +                                 struct drm_file *file,
> +                                 uint64_t fence_ctx, uint32_t ring_idx)
> +{
> +       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> +       struct virtio_gpu_device *vgdev = dev->dev_private;
> +       struct virtio_gpu_fence *out_fence;
> +       int err;
> +
> +       memset(submit, 0, sizeof(*submit));
> +
> +       out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
> +       if (!out_fence)
> +               return -ENOMEM;
> +
> +       err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
> +       if (err) {
> +               dma_fence_put(&out_fence->f);
> +               return err;
> +       }

If we fail at any point after here, where is the out_fence referenced dropped?

> +
> +       submit->out_fence = out_fence;
> +       submit->fence_ctx = fence_ctx;
> +       submit->ring_idx = ring_idx;
> +       submit->out_fence_fd = -1;
> +       submit->vfpriv = vfpriv;
> +       submit->vgdev = vgdev;
> +       submit->exbuf = exbuf;
> +       submit->file = file;
> +
> +       err = virtio_gpu_init_submit_buflist(submit);
> +       if (err)
> +               return err;
> +
> +       submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
> +       if (IS_ERR(submit->buf))
> +               return PTR_ERR(submit->buf);
> +
> +       if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
> +               err = get_unused_fd_flags(O_CLOEXEC);
> +               if (err < 0)
> +                       return err;
> +
> +               submit->out_fence_fd = err;
> +       }
> +
> +       return 0;
> +}
> +
> +static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
> +{
> +       int ret = 0;
> +
> +       if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
> +               struct dma_fence *in_fence =
> +                               sync_file_get_fence(submit->exbuf->fence_fd);
> +               if (!in_fence)
> +                       return -EINVAL;
> +
> +               /*
> +                * Wait if the fence is from a foreign context, or if the fence
> +                * array contains any fence from a foreign context.
> +                */
> +               ret = virtio_gpu_dma_fence_wait(submit, in_fence);
> +
> +               dma_fence_put(in_fence);
> +       }
> +
> +       return ret;
> +}
> +
> +static int virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
> +{
> +       if (submit->out_fence_fd >= 0) {
> +               struct sync_file *sync_file =
> +                                       sync_file_create(&submit->out_fence->f);
> +               if (!sync_file)
> +                       return -ENOMEM;
> +
> +               submit->exbuf->fence_fd = submit->out_fence_fd;
> +               fd_install(submit->out_fence_fd, sync_file->file);
> +       }
> +
> +       return 0;
> +}
> +
> +static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
> +{
> +       if (submit->buflist)
> +               return virtio_gpu_array_lock_resv(submit->buflist);
> +
> +       return 0;
> +}
> +
> +/*
> + * Usage of execbuffer:
> + * Relocations need to take into account the full VIRTIO_GPUDrawable size.
> + * However, the command as passed from user space must *not* contain the initial
> + * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
> + */

I know this is just getting moved from the old location, but I'm not
even sure what this comment means ;-)

At least it doesn't make any sense for non-virgl contexts.. I haven't
looked too closely at virgl protocol itself

BR,
-R

> +int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
> +                               struct drm_file *file)
> +{
> +       struct virtio_gpu_device *vgdev = dev->dev_private;
> +       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> +       uint64_t fence_ctx = vgdev->fence_drv.context;
> +       struct drm_virtgpu_execbuffer *exbuf = data;
> +       struct virtio_gpu_submit submit;
> +       uint32_t ring_idx = 0;
> +       int ret = -EINVAL;
> +
> +       if (vgdev->has_virgl_3d == false)
> +               return -ENOSYS;
> +
> +       if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
> +               return ret;
> +
> +       if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
> +               if (exbuf->ring_idx >= vfpriv->num_rings)
> +                       return ret;
> +
> +               if (!vfpriv->base_fence_ctx)
> +                       return ret;
> +
> +               fence_ctx = vfpriv->base_fence_ctx;
> +               ring_idx = exbuf->ring_idx;
> +       }
> +
> +       virtio_gpu_create_context(dev, file);
> +
> +       ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
> +                                    fence_ctx, ring_idx);
> +       if (ret)
> +               goto cleanup;
> +
> +       ret = virtio_gpu_wait_in_fence(&submit);
> +       if (ret)
> +               goto cleanup;
> +
> +       ret = virtio_gpu_install_out_fence_fd(&submit);
> +       if (ret)
> +               goto cleanup;
> +
> +       ret = virtio_gpu_lock_buflist(&submit);
> +       if (ret)
> +               goto cleanup;
> +
> +       virtio_gpu_submit(&submit);
> +cleanup:
> +       virtio_gpu_cleanup_submit(&submit);
> +
> +       return ret;
> +}
> --
> 2.39.2
>
_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux