Re: [Intel-gfx] [PATCH v2: 1/3] drm/i915: Add a function to mmap framebuffer obj

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 3/20/2023 3:02 PM, Andrzej Hajda wrote:
On 20.03.2023 11:09, Nirmoy Das wrote:
Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
callback for i915's framebuffer objects.

v2: add a comment why i915_gem_object_get() needed(Andi).

Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
Cc: Andi Shyti <andi.shyti@xxxxxxxxxxxxxxx>
Cc: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx>
Cc: Jani Nikula <jani.nikula@xxxxxxxxx>
Cc: Imre Deak <imre.deak@xxxxxxxxx>
Signed-off-by: Nirmoy Das <nirmoy.das@xxxxxxxxx>
Reviewed-by: Andi Shyti <andi.shyti@xxxxxxxxxxxxxxx>

Reviewed-by: Andrzej Hajda <andrzej.hajda@xxxxxxxxx>


Thanks, Andrzej.


Going to resend it without RFC now as there are two r-bs and no one complained.


Regards,

Nirmoy


Regards
Andrzej

---
  drivers/gpu/drm/i915/gem/i915_gem_mman.c | 127 +++++++++++++++--------
  drivers/gpu/drm/i915/gem/i915_gem_mman.h |   2 +-
  2 files changed, 83 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d3c1dee16af2..341e952d3510 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -927,53 +927,15 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
      return file;
  }
  -/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+i915_gem_object_mmap(struct drm_i915_gem_object *obj,
+             struct i915_mmap_offset *mmo,
+             struct vm_area_struct *vma)
  {
-    struct drm_vma_offset_node *node;
-    struct drm_file *priv = filp->private_data;
-    struct drm_device *dev = priv->minor->dev;
-    struct drm_i915_gem_object *obj = NULL;
-    struct i915_mmap_offset *mmo = NULL;
+    struct drm_i915_private *i915 = to_i915(obj->base.dev);
+    struct drm_device *dev = &i915->drm;
      struct file *anon;
  -    if (drm_dev_is_unplugged(dev))
-        return -ENODEV;
-
-    rcu_read_lock();
-    drm_vma_offset_lock_lookup(dev->vma_offset_manager);
-    node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
-                          vma->vm_pgoff,
-                          vma_pages(vma));
-    if (node && drm_vma_node_is_allowed(node, priv)) {
-        /*
-         * Skip 0-refcnted objects as it is in the process of being
-         * destroyed and will be invalid when the vma manager lock
-         * is released.
-         */
-        if (!node->driver_private) {
-            mmo = container_of(node, struct i915_mmap_offset, vma_node);
-            obj = i915_gem_object_get_rcu(mmo->obj);
-
-            GEM_BUG_ON(obj && obj->ops->mmap_ops);
-        } else {
-            obj = i915_gem_object_get_rcu
-                (container_of(node, struct drm_i915_gem_object,
-                          base.vma_node));
-
-            GEM_BUG_ON(obj && !obj->ops->mmap_ops);
-        }
-    }
-    drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
-    rcu_read_unlock();
-    if (!obj)
-        return node ? -EACCES : -EINVAL;
-
      if (i915_gem_object_is_readonly(obj)) {
          if (vma->vm_flags & VM_WRITE) {
              i915_gem_object_put(obj);
@@ -1005,7 +967,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
      if (obj->ops->mmap_ops) {
          vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
          vma->vm_ops = obj->ops->mmap_ops;
-        vma->vm_private_data = node->driver_private;
+        vma->vm_private_data = obj->base.vma_node.driver_private;
          return 0;
      }
  @@ -1043,6 +1005,81 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
      return 0;
  }
  +/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+    struct drm_vma_offset_node *node;
+    struct drm_file *priv = filp->private_data;
+    struct drm_device *dev = priv->minor->dev;
+    struct drm_i915_gem_object *obj = NULL;
+    struct i915_mmap_offset *mmo = NULL;
+
+    if (drm_dev_is_unplugged(dev))
+        return -ENODEV;
+
+    rcu_read_lock();
+    drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+    node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+                          vma->vm_pgoff,
+                          vma_pages(vma));
+    if (node && drm_vma_node_is_allowed(node, priv)) {
+        /*
+         * Skip 0-refcnted objects as it is in the process of being
+         * destroyed and will be invalid when the vma manager lock
+         * is released.
+         */
+        if (!node->driver_private) {
+            mmo = container_of(node, struct i915_mmap_offset, vma_node);
+            obj = i915_gem_object_get_rcu(mmo->obj);
+
+            GEM_BUG_ON(obj && obj->ops->mmap_ops);
+        } else {
+            obj = i915_gem_object_get_rcu
+                (container_of(node, struct drm_i915_gem_object,
+                          base.vma_node));
+
+            GEM_BUG_ON(obj && !obj->ops->mmap_ops);
+        }
+    }
+    drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+    rcu_read_unlock();
+    if (!obj)
+        return node ? -EACCES : -EINVAL;
+
+    return i915_gem_object_mmap(obj, mmo, vma);
+}
+
+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
+{
+    struct drm_i915_private *i915 = to_i915(obj->base.dev);
+    struct drm_device *dev = &i915->drm;
+    struct i915_mmap_offset *mmo = NULL;
+    enum i915_mmap_type mmap_type;
+    struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+    if (drm_dev_is_unplugged(dev))
+        return -ENODEV;
+
+    mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
+    mmo = mmap_offset_attach(obj, mmap_type, NULL);
+    if (!mmo)
+        return -ENODEV;
+
+    /*
+     * When we install vm_ops for mmap we are too late for
+     * the vm_ops->open() which increases the ref_count of
+     * this obj and then it gets decreased by the vm_ops->close().
+     * To balance this increase the obj ref_count here.
+     */
+    obj = i915_gem_object_get(mmo->obj);
+    return i915_gem_object_mmap(obj, mmo, vma);
+}
+
  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  #include "selftests/i915_gem_mman.c"
  #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 1fa91b3033b3..196417fd0f5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -29,5 +29,5 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);     void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);   void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
-
+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma);
  #endif




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux