[RFC PATCH 29/42] drm/i915: Set correct vmf source pages for gem objects

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx>

This allows page-faults from objects with different backing stores
from a single interface.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx>
Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx>
Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h          |  1 +
 drivers/gpu/drm/i915/i915_gem.c          | 58 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem_object.h   |  6 ++-
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++++++++++++
 5 files changed, 92 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dc446430a340..b1200d7ebd13 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2944,7 +2944,7 @@ static const struct file_operations i915_driver_fops = {
 	.open = drm_open,
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
-	.mmap = drm_gem_mmap,
+	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.compat_ioctl = i915_compat_ioctl,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 772fffa87545..065953a9264f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3071,6 +3071,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 unsigned int flags,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95e31529a738..48dbb57fbc6d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,6 +38,7 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
@@ -377,6 +378,7 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 	.get_pages = i915_gem_object_get_pages_phys,
 	.put_pages = i915_gem_object_put_pages_phys,
 	.release = i915_gem_object_release_phys,
+	.vmf_fill_pages = i915_gem_vmf_fill_pages_cpu,
 };
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
@@ -1938,7 +1940,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_unlock;
 	}
 
-	ret = __vmf_fill_pages_gtt(obj, vmf, page_offset);
+	ret = obj->ops->vmf_fill_pages(obj, vmf, page_offset);
 
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
@@ -2166,6 +2168,26 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+	struct drm_i915_gem_object *obj;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret < 0)
+		return ret;
+
+	obj = to_intel_bo(vma->vm_private_data);
+	if (obj->memory_region) {
+		if (obj->mmap_origin == I915_MMAP_ORIGIN_OFFSET) {
+			vma->vm_flags &= ~VM_PFNMAP;
+			vma->vm_flags |= VM_MIXEDMAP;
+		}
+	}
+
+	return ret;
+}
+
 /* Immediately discard the backing storage */
 static void
 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -4194,6 +4216,37 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
+int i915_gem_vmf_fill_pages_cpu(struct drm_i915_gem_object *obj,
+					 struct vm_fault *vmf,
+					 pgoff_t page_offset)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct page *page;
+	unsigned long pfn;
+	vm_fault_t vmf_ret;
+	pgoff_t pg_off = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+
+	if (HAS_MAPPABLE_APERTURE(dev_priv))
+		return __vmf_fill_pages_gtt(obj, vmf, page_offset);
+
+	page = i915_gem_object_get_page(obj, pg_off);
+	pfn = page_to_pfn(page);
+
+	vmf_ret = vmf_insert_mixed(area, vmf->address,
+                                  __pfn_to_pfn_t(pfn, PFN_DEV));
+	if (vmf_ret & VM_FAULT_ERROR)
+		return vm_fault_to_errno(vmf_ret, 0);
+
+	if (!obj->userfault_count++)
+		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
+	GEM_BUG_ON(!obj->userfault_count);
+
+	return 0;
+}
+
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 		 I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -4202,6 +4255,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
 	.put_pages = i915_gem_object_put_pages_gtt,
 
 	.pwrite = i915_gem_object_pwrite_gtt,
+	.vmf_fill_pages = i915_gem_vmf_fill_pages_cpu,
 };
 
 static int i915_gem_object_create_shmem(struct drm_device *dev,
@@ -4818,6 +4872,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 			spin_unlock(&i915->mm.obj_lock);
 		}
 
+
+		i915_gem_release_mmap(obj);
 		mutex_unlock(&i915->drm.struct_mutex);
 
 		GEM_BUG_ON(obj->bind_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index f5b8bca33fe3..5c6bbe6f5e84 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -82,6 +82,8 @@ struct drm_i915_gem_object_ops {
 
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
+	int (*vmf_fill_pages)(struct drm_i915_gem_object *, struct vm_fault *,
+			      pgoff_t);
 };
 
 struct drm_i915_gem_object {
@@ -520,5 +522,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 					 unsigned int cache_level);
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 
+int i915_gem_vmf_fill_pages_cpu(struct drm_i915_gem_object *obj,
+				struct vm_fault *vmf,
+				pgoff_t page_offset);
 #endif
-
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index d549242a3578..b8b6b8248737 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -170,12 +170,40 @@ static int region_lmem_pwrite(struct drm_i915_gem_object *obj,
 	return ret;
 }
 
+static int region_lmem_vmf_fill_pages(struct drm_i915_gem_object *obj,
+				      struct vm_fault *vmf,
+				      pgoff_t page_offset)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	unsigned long size = area->vm_end - area->vm_start;
+	int i;
+	vm_fault_t vmf_ret;
+
+	for (i = 0; i < size >> PAGE_SHIFT; i++) {
+		vmf_ret = vmf_insert_pfn(area,
+					 (unsigned long)area->vm_start + i * PAGE_SIZE,
+					 i915_gem_object_lmem_io_pfn(obj, i));
+		if (vmf_ret & VM_FAULT_ERROR)
+			return vm_fault_to_errno(vmf_ret, 0);
+	}
+
+	if (!obj->userfault_count++)
+		list_add(&obj->userfault_link, &i915->mm.userfault_list);
+
+	GEM_BUG_ON(!obj->userfault_count);
+
+	return 0;
+}
+
 static const struct drm_i915_gem_object_ops region_lmem_obj_ops = {
 	.get_pages = i915_memory_region_get_pages_buddy,
 	.put_pages = i915_memory_region_put_pages_buddy,
 	.release = i915_gem_object_release_memory_region,
 	.pread = region_lmem_pread,
 	.pwrite = region_lmem_pwrite,
+	.vmf_fill_pages = region_lmem_vmf_fill_pages,
 };
 
 static struct drm_i915_gem_object *
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux