Re: [PATCH 3/8] drm/amdgpu: Implement mmap as GEM object function

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Thomas,

Am 06.04.21 um 12:38 schrieb Thomas Zimmermann:
Hi

Am 06.04.21 um 11:35 schrieb Christian König:
Am 06.04.21 um 11:08 schrieb Thomas Zimmermann:
Moving the driver-specific mmap code into a GEM object function allows
for using DRM helpers for various mmap callbacks.

This change resolves several inconsistencies between regular mmap and
prime-based mmap. The vm_ops field in vma is now set for all mmap'ed
areas. Previously it way only set for regular mmap calls, prime-based
mmap used TTM's default vm_ops. The check for kfd_bo has been taken
from amdgpu_verify_access(), which is not called any longer and has
been removed.

As a side effect, amdgpu_ttm_vm_ops and amdgpu_ttm_fault() are now
implemented in amdgpu's GEM code.

Signed-off-by: Thomas Zimmermann <tzimmermann@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 46 -------------
  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h |  2 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c     |  4 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c     | 64 +++++++++++++++++++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c     | 71 ---------------------
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h     |  1 -
  6 files changed, 66 insertions(+), 122 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e0c4f7c7f1b9..19c5ab08d9ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -42,52 +42,6 @@
  #include <linux/pci-p2pdma.h>
  #include <linux/pm_runtime.h>
-/**
- * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
- * @obj: GEM BO
- * @vma: Virtual memory area
- *
- * Sets up a userspace mapping of the BO's memory in the given
- * virtual memory area.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
-              struct vm_area_struct *vma)
-{
-    struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-    struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-    unsigned asize = amdgpu_bo_size(bo);
-    int ret;
-
-    if (!vma->vm_file)
-        return -ENODEV;
-
-    if (adev == NULL)
-        return -ENODEV;
-
-    /* Check for valid size. */
-    if (asize < vma->vm_end - vma->vm_start)
-        return -EINVAL;
-
-    if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
-        (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
-        return -EPERM;
-    }
-    vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
-
-    /* prime mmap does not need to check access, so allow here */
-    ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
-    if (ret)
-        return ret;
-
-    ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
-    drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
-
-    return ret;
-}
-
  static int
  __dma_resv_make_exclusive(struct dma_resv *obj)
  {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 39b5b9616fd8..3e93b9b407a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
                          struct dma_buf *dma_buf);
  bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
                        struct amdgpu_bo *bo);
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
-              struct vm_area_struct *vma);
  extern const struct dma_buf_ops amdgpu_dmabuf_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 76f48f79c70b..e96d2758f4bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1656,7 +1656,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
      .flush = amdgpu_flush,
      .release = drm_release,
      .unlocked_ioctl = amdgpu_drm_ioctl,
-    .mmap = amdgpu_mmap,
+    .mmap = drm_gem_mmap,
      .poll = drm_poll,
      .read = drm_read,
  #ifdef CONFIG_COMPAT
@@ -1719,7 +1719,7 @@ static const struct drm_driver amdgpu_kms_driver = {
      .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
      .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
      .gem_prime_import = amdgpu_gem_prime_import,
-    .gem_prime_mmap = amdgpu_gem_prime_mmap,
+    .gem_prime_mmap = drm_gem_prime_mmap,
      .name = DRIVER_NAME,
      .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fb7171e5507c..fe93faad05f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -41,6 +41,36 @@
  static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)

Please name that function amdgpu_gem_fault or amdgpu_gem_object_fault

+{
+    struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+    vm_fault_t ret;
+
+    ret = ttm_bo_vm_reserve(bo, vmf);
+    if (ret)
+        return ret;
+
+    ret = amdgpu_bo_fault_reserve_notify(bo);
+    if (ret)
+        goto unlock;
+
+    ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+                       TTM_BO_VM_NUM_PREFAULT, 1);
+    if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+        return ret;
+
+unlock:
+    dma_resv_unlock(bo->base.resv);
+    return ret;
+}
+
+static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
+    .fault = amdgpu_ttm_fault,
+    .open = ttm_bo_vm_open,
+    .close = ttm_bo_vm_close,
+    .access = ttm_bo_vm_access
+};
+
  static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  {
      struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
@@ -201,6 +231,38 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
      ttm_eu_backoff_reservation(&ticket, &list);
  }
+static int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+    struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+    struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+    unsigned long asize = amdgpu_bo_size(bo);
+
+    if (!vma->vm_file)
+        return -ENODEV;
+
+    if (!adev)
+        return -ENODEV;
+
+    /* Check for valid size. */
+    if (asize < vma->vm_end - vma->vm_start)
+        return -EINVAL;

+
+    /*
+     * Don't verify access for KFD BOs. They don't have a GEM
+     * object associated with them.
+     */
+    if (bo->kfd_bo)
+        goto out;

Who does the access verification now?

This is somewhat confusing.

I took this check as-is, including the comment, from amdgpu's verify_access function. The verify_access function was called by ttm_bo_mmap. It returned 0 and ttm_bo_mmap did the mapping.


Christian.

+
+    if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+        (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
+        return -EPERM;
+    }
+
+out:
+    return drm_gem_ttm_mmap(obj, vma);
+}
+
  static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
      .free = amdgpu_gem_object_free,
      .open = amdgpu_gem_object_open,
@@ -208,6 +270,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
      .export = amdgpu_gem_prime_export,
      .vmap = drm_gem_ttm_vmap,
      .vunmap = drm_gem_ttm_vunmap,
+    .mmap = amdgpu_gem_prime_mmap,
+    .vm_ops = &amdgpu_ttm_vm_ops,
  };
  /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1c6131489a85..d9de91a517c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -152,32 +152,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
      *placement = abo->placement;
  }
-/**
- * amdgpu_verify_access - Verify access for a mmap call
- *
- * @bo:    The buffer object to map
- * @filp: The file pointer from the process performing the mmap
- *
- * This is called by ttm_bo_mmap() to verify whether a process
- * has the right to mmap a BO to their process space.
- */
-static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
-    struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-
-    /*
-     * Don't verify access for KFD BOs. They don't have a GEM
-     * object associated with them.
-     */
-    if (abo->kfd_bo)
-        return 0;
-
-    if (amdgpu_ttm_tt_get_usermm(bo->ttm))
-        return -EPERM;
-    return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
-                      filp->private_data);
-}

Here's the orignal verification code. It gives a free pass to KFD.

-
  /**
   * amdgpu_ttm_map_buffer - Map memory into the GART windows
   * @bo: buffer object to map
@@ -1531,7 +1505,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = {
      .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
      .evict_flags = &amdgpu_evict_flags,
      .move = &amdgpu_bo_move,
-    .verify_access = &amdgpu_verify_access,
      .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
      .release_notify = &amdgpu_bo_release_notify,
      .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
@@ -1906,50 +1879,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
      adev->mman.buffer_funcs_enabled = enable;
  }
-static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
-{
-    struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
-    vm_fault_t ret;
-
-    ret = ttm_bo_vm_reserve(bo, vmf);
-    if (ret)
-        return ret;
-
-    ret = amdgpu_bo_fault_reserve_notify(bo);
-    if (ret)
-        goto unlock;
-
-    ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
-                       TTM_BO_VM_NUM_PREFAULT, 1);
-    if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
-        return ret;
-
-unlock:
-    dma_resv_unlock(bo->base.resv);
-    return ret;
-}
-
-static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
-    .fault = amdgpu_ttm_fault,
-    .open = ttm_bo_vm_open,
-    .close = ttm_bo_vm_close,
-    .access = ttm_bo_vm_access
-};
-
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-    struct drm_file *file_priv = filp->private_data;
-    struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
-    int r;
-
-    r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
-    if (unlikely(r != 0))
-        return r;
-
-    vma->vm_ops = &amdgpu_ttm_vm_ops;
-    return 0;
-}

And this was the mmap callback in struct file_operations. It calls ttm_bo_mmap(), which skips verification for KFD BOs. To the best of my knowledge, there was no additional verification for these KFD BOs.

The original code in amdgpu_gem_prime_mmap() did seom verification, but didn't handle KFD specially. I guess, PRIME needs GEM and KFD BOs wouldn't quailify.

In the end I went with the semantics I found in amdgpu_mmap() and handled KFD specially. Let me know if this requires to be changed.

Well the question is where is the call to drm_vma_node_verify_access() now? Cause that needs to be skipped for KFD BOs.

Regards,
Christian.


Best regards
Thomas

-
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                 uint64_t dst_offset, uint32_t byte_count,
                 struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index dec0db8b0b13..6e51faad7371 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -146,7 +146,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
              struct dma_resv *resv,
              struct dma_fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
  int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
  int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
  uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel


_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux