[PATCH 48/56] drm/i915: Restructure map vs. insert entries

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



After this change, the old GGTT keeps its insert_entries/clear_range
functions as we don't expect those to ever change in terms of page table
levels. The address space now gets map_vma/unmap VMA. It better reflects
the operations we actually want to support for a VMA.

I was too lazy, but the GGTT should really use these new functions as
well.

BISECT WARNING: This commit breaks aliasing PPGTT as is. If you see this
during bisect, please skip. There was no other way I could find to make
these changes remotely readable

Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.h     |   1 +
 drivers/gpu/drm/i915/i915_gem_gtt.c | 223 +++++++++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  24 ++--
 3 files changed, 126 insertions(+), 122 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4d53728..a043941 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -571,6 +571,7 @@ enum i915_cache_level {
 			      large Last-Level-Cache. LLC is coherent with
 			      the CPU, but L3 is only visible to the GPU. */
 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
+	I915_CACHE_MAX,
 };
 
 struct i915_ctx_hang_stats {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 15e61d8..d67d803 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -730,9 +730,9 @@ static void gen8_map_page_directory_pointer(struct i915_pml4 *pml4,
 	kunmap_atomic(pagemap);
 }
 
-static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
-					struct i915_pagedirpo *pdp,
-					uint64_t start, uint64_t length)
+static void gen8_unmap_vma_3lvl(struct i915_address_space *vm,
+				struct i915_pagedirpo *pdp,
+				uint64_t start, uint64_t length)
 {
 	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
@@ -817,38 +817,43 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
 	}
 }
 
-static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
-					struct i915_pml4 *pml4,
-					uint64_t start, uint64_t length)
+static void gen8_unmap_vma_4lvl(struct i915_address_space *vm,
+				struct i915_pml4 *pml4,
+				uint64_t start, uint64_t length)
 {
 	struct i915_pagedirpo *pdp;
 	uint64_t temp, pml4e;
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
-		gen8_teardown_va_range_3lvl(vm, pdp, start, length);
+		gen8_unmap_vma_3lvl(vm, pdp, start, length);
 		if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev)))
 			clear_bit(pml4e, pml4->used_pml4es);
 	}
 }
 
-static void gen8_teardown_va_range(struct i915_address_space *vm,
-				   uint64_t start, uint64_t length)
+static void __gen8_teardown_va_range(struct i915_address_space *vm,
+				     uint64_t start, uint64_t length)
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
 
 	if (HAS_48B_PPGTT(vm->dev))
-		gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+		gen8_unmap_vma_4lvl(vm, &ppgtt->pml4, start, length);
 	else
-		gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+		gen8_unmap_vma_3lvl(vm, &ppgtt->pdp, start, length);
+}
+
+static void gen8_unmap_vma(struct i915_vma *vma)
+{
+	__gen8_teardown_va_range(vma->vm, vma->node.start, vma->node.size);
 }
 
 static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 {
 	trace_i915_va_teardown(&ppgtt->base,
 			       ppgtt->base.start, ppgtt->base.total);
-	gen8_teardown_va_range(&ppgtt->base,
-			       ppgtt->base.start, ppgtt->base.total);
+	__gen8_teardown_va_range(&ppgtt->base,
+				 ppgtt->base.start, ppgtt->base.total);
 
 	WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes,
 			      I915_PDPES_PER_PDP(ppgtt->base.dev)));
@@ -1188,15 +1193,15 @@ err_out:
 	start = orig_start;
 	length = orig_length;
 	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e)
-		gen8_teardown_va_range_3lvl(vm, pdp, start, length);
+		gen8_unmap_vma_3lvl(vm, pdp, start, length);
 
 err_alloc:
 	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
 		free_pdp_single(pdp, vm->dev);
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start, uint64_t length)
+static int __gen8_alloc_va_range(struct i915_address_space *vm,
+				 uint64_t start, uint64_t length)
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
@@ -1207,6 +1212,18 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
 }
 
+static int gen8_map_vma(struct i915_vma *vma, u32 flags)
+{
+	int ret =  __gen8_alloc_va_range(vma->vm, vma->node.start,vma->node.size);
+	if (!ret) {
+		BUG_ON(flags >= I915_CACHE_MAX);
+		gen8_ppgtt_insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+					  flags);
+	}
+
+	return ret;
+}
+
 static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
 {
 	free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1229,7 +1246,6 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 	ppgtt->base.total = size;
 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 	ppgtt->enable = gen8_ppgtt_enable;
-	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 
 	ppgtt->scratch_pd = alloc_pt_scratch(ppgtt->base.dev);
 	if (IS_ERR(ppgtt->scratch_pd))
@@ -1270,7 +1286,7 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ret = gen8_alloc_va_range(&ppgtt->base, start, size);
+	ret = __gen8_alloc_va_range(&ppgtt->base, start, size);
 	if (ret) {
 		gen8_ppgtt_fini_common(ppgtt);
 		return ret;
@@ -1280,9 +1296,11 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
 		gen8_map_pagetable_range(&ppgtt->base, pd, start, size);
 
-	ppgtt->base.allocate_va_range = NULL;
-	ppgtt->base.teardown_va_range = NULL;
-	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+	BUG(); // we need a map_vma for aliasing
+	ppgtt->base.map_vma = NULL;
+	ppgtt->base.unmap_vma = NULL;
+
+	gen8_ppgtt_clear_range(&ppgtt->base, 0, dev_priv->gtt.base.total, true);
 
 	return 0;
 }
@@ -1297,9 +1315,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
-	ppgtt->base.teardown_va_range = gen8_teardown_va_range;
-	ppgtt->base.clear_range = NULL;
+	ppgtt->base.map_vma = gen8_map_vma;
+	ppgtt->base.unmap_vma = gen8_unmap_vma;
 
 	return 0;
 }
@@ -1670,15 +1687,16 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 		kunmap_atomic(pt_vaddr);
 }
 
-static int gen6_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start, uint64_t length)
+static int gen6_alloc_va_range(struct i915_vma *vma, u32 flags)
 {
 	DECLARE_BITMAP(new_page_tables, I915_PDES_PER_PD);
+	struct i915_address_space *vm = vma->vm;
 	struct drm_device *dev = vm->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_ppgtt *ppgtt =
 		        container_of(vm, struct i915_hw_ppgtt, base);
 	struct i915_pagetab *pt;
+	uint32_t start = vma->node.start, length = vma->node.size;
 	const uint32_t start_save = start, length_save = length;
 	uint32_t pde, temp;
 	int ret;
@@ -1748,6 +1766,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 	 * table. Also require for WC mapped PTEs */
 	readl(dev_priv->gtt.gsm);
 
+	BUG_ON(flags >= I915_CACHE_MAX);
+	gen6_ppgtt_insert_entries(vm, vma->obj->pages, vma->node.start, flags);
+
 	return 0;
 
 unwind_out:
@@ -1759,18 +1780,20 @@ unwind_out:
 	return ret;
 }
 
-static void gen6_teardown_va_range(struct i915_address_space *vm,
-				   uint64_t start, uint64_t length)
+static void gen6_unmap_vma(struct i915_vma *vma)
 {
+	struct i915_address_space *vm = vma->vm;
 	struct i915_hw_ppgtt *ppgtt =
 		        container_of(vm, struct i915_hw_ppgtt, base);
+	uint32_t start = vma->node.start, length = vma->node.size;
+	const uint32_t orig_start = start, orig_length = length;
 	struct i915_pagetab *pt;
 	uint32_t pde, temp;
 
 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
 
 		if (WARN(pt == ppgtt->scratch_pt,
-		    "Tried to teardown scratch page vm %p. pde %u: %llx-%llx\n",
+		    "Tried to teardown scratch page vm %p. pde %u: %x-%x\n",
 		    vm, pde, start, start + length))
 			continue;
 
@@ -1790,6 +1813,8 @@ static void gen6_teardown_va_range(struct i915_address_space *vm,
 			ppgtt->pd.page_tables[pde] = ppgtt->scratch_pt;
 		}
 	}
+
+	gen6_ppgtt_clear_range(vm, orig_start, orig_length, true);
 }
 
 static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -1919,10 +1944,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
 	if (ret)
 		return ret;
 
-	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
-	ppgtt->base.teardown_va_range = gen6_teardown_va_range;
-	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+	ppgtt->base.map_vma = gen6_alloc_va_range;
+	ppgtt->base.unmap_vma = gen6_unmap_vma;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.start = 0;
 	ppgtt->base.total = I915_PDES_PER_PD * GEN6_PTES_PER_PT * PAGE_SIZE;
@@ -1968,8 +1991,6 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, boo
 
 	kref_init(&ppgtt->ref);
 	drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, ppgtt->base.total);
-	if (ppgtt->base.clear_range)
-		ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 	i915_init_vm(dev_priv, &ppgtt->base);
 
 	return 0;
@@ -1993,40 +2014,28 @@ ppgtt_bind_vma(struct i915_vma *vma,
 	int ret;
 
 	WARN_ON(flags);
-	if (vma->vm->allocate_va_range) {
-		trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size);
-
-		ret = vma->vm->allocate_va_range(vma->vm,
-						 vma->node.start,
-						 vma->node.size);
-		if (ret)
-			return ret;
+	BUG_ON(!vma->vm->map_vma);
+	trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size);
 
-		ppgtt_invalidate_tlbs(vma->vm);
-	}
+	ret = vma->vm->map_vma(vma, cache_level);
+	if (ret)
+		return ret;
 
-	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
-				cache_level);
+	ppgtt_invalidate_tlbs(vma->vm);
 
 	return 0;
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-	if (vma->vm->clear_range)
-		vma->vm->clear_range(vma->vm,
-				     vma->node.start,
-				     vma->obj->base.size,
-				     true);
-
-	if (vma->vm->teardown_va_range) {
+	if (vma->vm->unmap_vma) {
 		trace_i915_va_teardown(vma->vm,
 				       vma->node.start, vma->node.size);
 
-		vma->vm->teardown_va_range(vma->vm,
-					   vma->node.start, vma->node.size);
+		vma->vm->unmap_vma(vma);
 		ppgtt_invalidate_tlbs(vma->vm);
-	}
+	} else
+		BUG();
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -2108,10 +2117,10 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 
 	i915_check_and_clear_faults(dev);
 
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       dev_priv->gtt.base.start,
-				       dev_priv->gtt.base.total,
-				       true);
+	dev_priv->gtt.clear_range(&dev_priv->gtt,
+				  dev_priv->gtt.base.start,
+				  dev_priv->gtt.base.total,
+				  true);
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -2123,10 +2132,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	i915_check_and_clear_faults(dev);
 
 	/* First fill our portion of the GTT with scratch pages */
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       dev_priv->gtt.base.start,
-				       dev_priv->gtt.base.total,
-				       true);
+	dev_priv->gtt.clear_range(&dev_priv->gtt,
+				  dev_priv->gtt.base.start,
+				  dev_priv->gtt.base.total,
+				  true);
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
@@ -2199,15 +2208,16 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
 #endif
 }
 
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+static void gen8_ggtt_insert_entries(struct i915_gtt *gtt,
 				     struct sg_table *st,
 				     uint64_t start,
 				     enum i915_cache_level level)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv =
+		container_of(gtt, struct drm_i915_private, gtt);
 	unsigned first_entry = start >> PAGE_SHIFT;
 	gen8_gtt_pte_t __iomem *gtt_entries =
-		(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+		(gen8_gtt_pte_t __iomem *)gtt->gsm + first_entry;
 	int i = 0;
 	struct sg_page_iter sg_iter;
 	dma_addr_t addr = 0;
@@ -2245,22 +2255,23 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
  * within the global GTT as well as accessible by the GPU through the GMADR
  * mapped BAR (dev_priv->mm.gtt->gtt).
  */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+static void gen6_ggtt_insert_entries(struct i915_gtt *gtt,
 				     struct sg_table *st,
 				     uint64_t start,
 				     enum i915_cache_level level)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv =
+		container_of(gtt, struct drm_i915_private, gtt);
 	unsigned first_entry = start >> PAGE_SHIFT;
 	gen6_gtt_pte_t __iomem *gtt_entries =
-		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+		(gen6_gtt_pte_t __iomem *)gtt->gsm + first_entry;
 	int i = 0;
 	struct sg_page_iter sg_iter;
 	dma_addr_t addr;
 
 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
 		addr = sg_page_iter_dma_address(&sg_iter);
-		iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+		iowrite32(gtt->base.pte_encode(addr, level, true), &gtt_entries[i]);
 		i++;
 	}
 
@@ -2272,7 +2283,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	 */
 	if (i != 0)
 		WARN_ON(readl(&gtt_entries[i-1]) !=
-			vm->pte_encode(addr, level, true));
+			gtt->base.pte_encode(addr, level, true));
 
 	/* This next bit makes the above posting read even more important. We
 	 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2282,17 +2293,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+static void gen8_ggtt_clear_range(struct i915_gtt *gtt,
 				  uint64_t start,
 				  uint64_t length,
 				  bool use_scratch)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
 	gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
-		(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
-	const int max_entries = gtt_total_entries(&dev_priv->gtt) - first_entry;
+		(gen8_gtt_pte_t __iomem *) gtt->gsm + first_entry;
+	const int max_entries = gtt_total_entries(gtt) - first_entry;
 	int i;
 
 	if (WARN(num_entries > max_entries,
@@ -2300,7 +2310,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+	scratch_pte = gen8_pte_encode(gtt->base.scratch.addr,
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -2342,17 +2352,16 @@ void gen8_for_every_pdpe_pde(struct i915_hw_ppgtt *ppgtt,
 	}
 }
 
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+static void gen6_ggtt_clear_range(struct i915_gtt *gtt,
 				  uint64_t start,
 				  uint64_t length,
 				  bool use_scratch)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
 	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
-		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
-	const int max_entries = gtt_total_entries(&dev_priv->gtt) - first_entry;
+		(gen6_gtt_pte_t __iomem *) gtt->gsm + first_entry;
+	const int max_entries = gtt_total_entries(gtt) - first_entry;
 	int i;
 
 	if (WARN(num_entries > max_entries,
@@ -2360,7 +2369,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+	scratch_pte = gtt->base.pte_encode(gtt->base.scratch.addr,
+					   I915_CACHE_LLC, use_scratch);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -2383,7 +2393,7 @@ static int i915_ggtt_bind_vma(struct i915_vma *vma,
 	return 0;
 }
 
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
+static void i915_ggtt_clear_range(struct i915_gtt *gunused,
 				  uint64_t start,
 				  uint64_t length,
 				  bool unused)
@@ -2425,9 +2435,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
 		if (!obj->has_global_gtt_mapping ||
 		    (cache_level != obj->cache_level)) {
-			vma->vm->insert_entries(vma->vm, obj->pages,
-						vma->node.start,
-						cache_level);
+			struct i915_gtt *gtt = &dev_priv->gtt;
+			gtt->insert_entries(gtt, obj->pages,
+					    vma->node.start,
+					    cache_level);
 			obj->has_global_gtt_mapping = 1;
 		}
 	}
@@ -2439,10 +2450,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	    (!obj->has_aliasing_ppgtt_mapping ||
 	     (cache_level != obj->cache_level))) {
 		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
-		appgtt->base.insert_entries(&appgtt->base,
-					    vma->obj->pages,
-					    vma->node.start,
-					    cache_level);
+		BUG();
+		appgtt->base.map_vma(vma, cache_level);
 		vma->obj->has_aliasing_ppgtt_mapping = 1;
 	}
 
@@ -2453,22 +2462,19 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 {
 	struct drm_device *dev = vma->vm->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_gtt *gtt = &dev_priv->gtt;
 	struct drm_i915_gem_object *obj = vma->obj;
 
+	BUG_ON(vma->vm != &gtt->base);
+
 	if (obj->has_global_gtt_mapping) {
-		vma->vm->clear_range(vma->vm,
-				     vma->node.start,
-				     obj->base.size,
-				     true);
+		gtt->clear_range(gtt, vma->node.start, obj->base.size, true);
 		obj->has_global_gtt_mapping = 0;
 	}
 
 	if (obj->has_aliasing_ppgtt_mapping) {
 		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
-		appgtt->base.clear_range(&appgtt->base,
-					 vma->node.start,
-					 obj->base.size,
-					 true);
+		appgtt->base.unmap_vma(vma);
 		obj->has_aliasing_ppgtt_mapping = 0;
 	}
 }
@@ -2521,7 +2527,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 	 * of the aperture.
 	 */
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+	struct i915_gtt *gtt = &dev_priv->gtt;
+	struct i915_address_space *ggtt_vm = &gtt->base;
 	struct drm_mm_node *entry;
 	struct drm_i915_gem_object *obj;
 	unsigned long hole_start, hole_end;
@@ -2554,12 +2561,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
 			      hole_start, hole_end);
-		ggtt_vm->clear_range(ggtt_vm, hole_start,
-				     hole_end - hole_start, true);
+		gtt->clear_range(gtt, hole_start,
+				 hole_end - hole_start, true);
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+	gtt->clear_range(gtt, end - PAGE_SIZE, PAGE_SIZE, true);
 }
 
 void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -2749,8 +2756,8 @@ static int gen8_gmch_probe(struct drm_device *dev,
 
 	ret = ggtt_probe_common(dev, gtt_size);
 
-	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
-	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+	dev_priv->gtt.clear_range = gen8_ggtt_clear_range;
+	dev_priv->gtt.insert_entries = gen8_ggtt_insert_entries;
 
 	return ret;
 }
@@ -2789,8 +2796,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
 
 	ret = ggtt_probe_common(dev, gtt_size);
 
-	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
-	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+	dev_priv->gtt.clear_range = gen6_ggtt_clear_range;
+	dev_priv->gtt.insert_entries = gen6_ggtt_insert_entries;
 
 	return ret;
 }
@@ -2823,7 +2830,7 @@ static int i915_gmch_probe(struct drm_device *dev,
 	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
 	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
-	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+	dev_priv->gtt.clear_range = i915_ggtt_clear_range;
 
 	if (unlikely(dev_priv->gtt.do_idle_maps))
 		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 3904ae5..c265c23 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -260,20 +260,8 @@ struct i915_address_space {
 	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
 				     enum i915_cache_level level,
 				     bool valid); /* Create a valid PTE */
-	int (*allocate_va_range)(struct i915_address_space *vm,
-				 uint64_t start,
-				 uint64_t length);
-	void (*teardown_va_range)(struct i915_address_space *vm,
-				  uint64_t start,
-				  uint64_t length);
-	void (*clear_range)(struct i915_address_space *vm,
-			    uint64_t start,
-			    uint64_t length,
-			    bool use_scratch);
-	void (*insert_entries)(struct i915_address_space *vm,
-			       struct sg_table *st,
-			       uint64_t start,
-			       enum i915_cache_level cache_level);
+	int (*map_vma)(struct i915_vma *vma, u32 flags);
+	void (*unmap_vma)(struct i915_vma *vma);
 	void (*cleanup)(struct i915_address_space *vm);
 };
 
@@ -329,6 +317,14 @@ struct i915_gtt {
 	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
 			  size_t *stolen, phys_addr_t *mappable_base,
 			  unsigned long *mappable_end);
+	void (*insert_entries)(struct i915_gtt *gtt,
+			       struct sg_table *st,
+			       uint64_t start,
+			       enum i915_cache_level cache_level);
+	void (*clear_range)(struct i915_gtt *gtt,
+			    uint64_t start,
+			    uint64_t length,
+			    bool use_scratch);
 };
 
 /* For each pde iterates over every pde between from start until start + length.
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux