[CI 11/35] drm/i915: Fallback to single page GTT mmappings for relocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If we cannot pin the entire object into the mappable region of the GTT,
try to pin a single page instead. This is much more likely to succeed,
and prevents us falling back to the clflush slow path.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 62 ++++++++++++++++++++++++------
 1 file changed, 51 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 017efda938bf..0e56bfed9a67 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -331,6 +331,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
 	cache->vaddr = 0;
 	cache->i915 = i915;
 	cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+	cache->node.allocated = false;
 }
 
 static inline void *unmask_page(unsigned long p)
@@ -360,8 +361,19 @@ static void reloc_cache_fini(struct reloc_cache *cache)
 		kunmap_atomic(vaddr);
 		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
 	} else {
+		wmb();
 		io_mapping_unmap_atomic((void __iomem *)vaddr);
-		i915_vma_unpin((struct i915_vma *)cache->node.mm);
+		if (cache->node.allocated) {
+			struct i915_ggtt *ggtt = &cache->i915->ggtt;
+
+			ggtt->base.clear_range(&ggtt->base,
+					       cache->node.start,
+					       cache->node.size,
+					       true);
+			drm_mm_remove_node(&cache->node);
+		} else {
+			i915_vma_unpin((struct i915_vma *)cache->node.mm);
+		}
 	}
 }
 
@@ -401,8 +413,19 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 			 struct reloc_cache *cache,
 			 int page)
 {
+	struct i915_ggtt *ggtt = &cache->i915->ggtt;
+	unsigned long offset;
 	void *vaddr;
 
+	if (cache->node.allocated) {
+		wmb();
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, page),
+				       cache->node.start, I915_CACHE_NONE, 0);
+		cache->page = page;
+		return unmask_page(cache->vaddr);
+	}
+
 	if (cache->vaddr) {
 		io_mapping_unmap_atomic(unmask_page(cache->vaddr));
 	} else {
@@ -418,21 +441,38 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 
 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
-		if (IS_ERR(vma))
-			return NULL;
+		if (IS_ERR(vma)) {
+			memset(&cache->node, 0, sizeof(cache->node));
+			ret = drm_mm_insert_node_in_range_generic
+				(&ggtt->base.mm, &cache->node,
+				 4096, 0, 0,
+				 0, ggtt->mappable_end,
+				 DRM_MM_SEARCH_DEFAULT,
+				 DRM_MM_CREATE_DEFAULT);
+			if (ret)
+				return ERR_PTR(ret);
+		} else {
+			ret = i915_gem_object_put_fence(obj);
+			if (ret) {
+				i915_vma_unpin(vma);
+				return ERR_PTR(ret);
+			}
 
-		ret = i915_gem_object_put_fence(obj);
-		if (ret) {
-			i915_vma_unpin(vma);
-			return ERR_PTR(ret);
+			cache->node.start = vma->node.start;
+			cache->node.mm = (void *)vma;
 		}
+	}
 
-		cache->node.start = vma->node.start;
-		cache->node.mm = (void *)vma;
+	offset = cache->node.start;
+	if (cache->node.allocated) {
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, page),
+				       offset, I915_CACHE_NONE, 0);
+	} else {
+		offset += page << PAGE_SHIFT;
 	}
 
-	vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable,
-					 cache->node.start + (page << PAGE_SHIFT));
+	vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
 	cache->page = page;
 	cache->vaddr = (unsigned long)vaddr;
 
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux