[PATCH 2/4] drm/i915/gtt: Recursive scratch page

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Fill the scratch page with pointers to itself so that we can reuse it at
any level of the page tables.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 202 ++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |   3 -
 2 files changed, 85 insertions(+), 120 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4a681b3332ad..7f2d8462f324 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -701,9 +701,7 @@ static void free_pd(struct i915_address_space *vm,
 static void gen8_initialize_pd(struct i915_address_space *vm,
 			       struct i915_page_directory *pd)
 {
-	fill_px(vm, pd,
-		gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
-	memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
+	fill_px(vm, pd, vm->scratch_pte);
 }
 
 static int __pdp_init(struct i915_address_space *vm,
@@ -711,13 +709,11 @@ static int __pdp_init(struct i915_address_space *vm,
 {
 	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
 
-	pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
-					    I915_GFP_ALLOW_FAIL);
+	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
+				      I915_GFP_ALLOW_FAIL);
 	if (unlikely(!pdp->page_directory))
 		return -ENOMEM;
 
-	memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
-
 	return 0;
 }
 
@@ -777,19 +773,13 @@ static void free_pdp(struct i915_address_space *vm,
 static void gen8_initialize_pdp(struct i915_address_space *vm,
 				struct i915_page_directory_pointer *pdp)
 {
-	gen8_ppgtt_pdpe_t scratch_pdpe;
-
-	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
-
-	fill_px(vm, pdp, scratch_pdpe);
+	fill_px(vm, pdp, vm->scratch_pte);
 }
 
 static void gen8_initialize_pml4(struct i915_address_space *vm,
 				 struct i915_pml4 *pml4)
 {
-	fill_px(vm, pml4,
-		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
-	memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
+	fill_px(vm, pml4, vm->scratch_pte);
 }
 
 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
@@ -825,17 +815,16 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 	return false;
 }
 
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
-			       struct i915_page_directory *pd,
-			       struct i915_page_table *pt,
-			       unsigned int pde)
+static void gen8_ppgtt_clear_pde(struct i915_address_space *vm,
+				 struct i915_page_directory *pd,
+				 unsigned int pde)
 {
 	gen8_pde_t *vaddr;
 
-	pd->page_table[pde] = pt;
+	pd->page_table[pde] = NULL;
 
 	vaddr = kmap_atomic_px(pd);
-	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
+	vaddr[pde] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 }
 
@@ -847,37 +836,34 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 	u32 pde;
 
 	gen8_for_each_pde(pt, pd, start, length, pde) {
-		GEM_BUG_ON(pt == vm->scratch_pt);
-
 		if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
 			continue;
 
-		gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
+		gen8_ppgtt_clear_pde(vm, pd, pde);
 		GEM_BUG_ON(!pd->used_pdes);
 		pd->used_pdes--;
-
 		free_pt(vm, pt);
 	}
 
 	return !pd->used_pdes;
 }
 
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
-				struct i915_page_directory_pointer *pdp,
-				struct i915_page_directory *pd,
-				unsigned int pdpe)
+static void gen8_ppgtt_clear_pdpe(struct i915_address_space *vm,
+				  struct i915_page_directory_pointer *pdp,
+				  unsigned int pdpe)
 {
 	gen8_ppgtt_pdpe_t *vaddr;
 
-	pdp->page_directory[pdpe] = pd;
+	pdp->page_directory[pdpe] = NULL;
 	if (!use_4lvl(vm))
 		return;
 
 	vaddr = kmap_atomic_px(pdp);
-	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
+	vaddr[pdpe] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 }
 
+
 /* Removes entries from a single page dir pointer, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries
  */
@@ -889,15 +875,12 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 	unsigned int pdpe;
 
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		GEM_BUG_ON(pd == vm->scratch_pd);
-
 		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
 			continue;
 
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_clear_pdpe(vm, pdp, pdpe);
 		GEM_BUG_ON(!pdp->used_pdpes);
 		pdp->used_pdpes--;
-
 		free_pd(vm, pd);
 	}
 
@@ -910,16 +893,16 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
 	gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
 }
 
-static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
-				 struct i915_page_directory_pointer *pdp,
-				 unsigned int pml4e)
+static void gen8_ppgtt_clear_pm4le(struct i915_address_space *vm,
+				   struct i915_pml4 *pml4,
+				   unsigned int pml4e)
 {
 	gen8_ppgtt_pml4e_t *vaddr;
 
-	pml4->pdps[pml4e] = pdp;
+	pml4->pdps[pml4e] = NULL;
 
 	vaddr = kmap_atomic_px(pml4);
-	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
+	vaddr[pml4e] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 }
 
@@ -938,13 +921,10 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
 	GEM_BUG_ON(!use_4lvl(vm));
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-		GEM_BUG_ON(pdp == vm->scratch_pdp);
-
 		if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
 			continue;
 
-		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-
+		gen8_ppgtt_clear_pm4le(vm, pml4, pml4e);
 		free_pdp(vm, pdp);
 	}
 }
@@ -1196,8 +1176,10 @@ static void gen8_free_page_tables(struct i915_address_space *vm,
 	int i;
 
 	for (i = 0; i < I915_PDES; i++) {
-		if (pd->page_table[i] != vm->scratch_pt)
-			free_pt(vm, pd->page_table[i]);
+		if (!pd->page_table[i])
+			continue;
+
+		free_pt(vm, pd->page_table[i]);
 	}
 }
 
@@ -1219,9 +1201,6 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 
 		vm->scratch_order = clone->scratch_order;
 		vm->scratch_pte = clone->scratch_pte;
-		vm->scratch_pt  = clone->scratch_pt;
-		vm->scratch_pd  = clone->scratch_pd;
-		vm->scratch_pdp = clone->scratch_pdp;
 		return 0;
 	}
 
@@ -1233,42 +1212,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 		gen8_pte_encode(vm->scratch_page.daddr,
 				I915_CACHE_LLC,
 				PTE_READ_ONLY);
-
-	vm->scratch_pt = alloc_pt(vm);
-	if (IS_ERR(vm->scratch_pt)) {
-		ret = PTR_ERR(vm->scratch_pt);
-		goto free_scratch_page;
-	}
-
-	vm->scratch_pd = alloc_pd(vm);
-	if (IS_ERR(vm->scratch_pd)) {
-		ret = PTR_ERR(vm->scratch_pd);
-		goto free_pt;
-	}
-
-	if (use_4lvl(vm)) {
-		vm->scratch_pdp = alloc_pdp(vm);
-		if (IS_ERR(vm->scratch_pdp)) {
-			ret = PTR_ERR(vm->scratch_pdp);
-			goto free_pd;
-		}
-	}
-
-	gen8_initialize_pt(vm, vm->scratch_pt);
-	gen8_initialize_pd(vm, vm->scratch_pd);
-	if (use_4lvl(vm))
-		gen8_initialize_pdp(vm, vm->scratch_pdp);
+	fill_page_dma((vm), &vm->scratch_page, vm->scratch_pte);
 
 	return 0;
-
-free_pd:
-	free_pd(vm, vm->scratch_pd);
-free_pt:
-	free_pt(vm, vm->scratch_pt);
-free_scratch_page:
-	cleanup_scratch_page(vm);
-
-	return ret;
 }
 
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -1308,10 +1254,6 @@ static void gen8_free_scratch(struct i915_address_space *vm)
 	if (!vm->scratch_page.daddr)
 		return;
 
-	if (use_4lvl(vm))
-		free_pdp(vm, vm->scratch_pdp);
-	free_pd(vm, vm->scratch_pd);
-	free_pt(vm, vm->scratch_pt);
 	cleanup_scratch_page(vm);
 }
 
@@ -1322,7 +1264,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
 	int i;
 
 	for (i = 0; i < pdpes; i++) {
-		if (pdp->page_directory[i] == vm->scratch_pd)
+		if (!pdp->page_directory[i])
 			continue;
 
 		gen8_free_page_tables(vm, pdp->page_directory[i]);
@@ -1337,7 +1279,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 	int i;
 
 	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
-		if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
+		if (!ppgtt->pml4.pdps[i])
 			continue;
 
 		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
@@ -1362,6 +1304,49 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 	gen8_free_scratch(vm);
 }
 
+static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
+			       struct i915_page_directory *pd,
+			       struct i915_page_table *pt,
+			       unsigned int pde)
+{
+	gen8_pde_t *vaddr;
+
+	pd->page_table[pde] = pt;
+
+	vaddr = kmap_atomic_px(pd);
+	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
+	kunmap_atomic(vaddr);
+}
+
+static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
+				struct i915_page_directory_pointer *pdp,
+				struct i915_page_directory *pd,
+				unsigned int pdpe)
+{
+	gen8_ppgtt_pdpe_t *vaddr;
+
+	pdp->page_directory[pdpe] = pd;
+	if (!use_4lvl(vm))
+		return;
+
+	vaddr = kmap_atomic_px(pdp);
+	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
+	kunmap_atomic(vaddr);
+}
+
+static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
+				 struct i915_page_directory_pointer *pdp,
+				 unsigned int pml4e)
+{
+	gen8_ppgtt_pml4e_t *vaddr;
+
+	pml4->pdps[pml4e] = pdp;
+
+	vaddr = kmap_atomic_px(pml4);
+	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
+	kunmap_atomic(vaddr);
+}
+
 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
 			       struct i915_page_directory *pd,
 			       u64 start, u64 length)
@@ -1373,7 +1358,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
 	gen8_for_each_pde(pt, pd, start, length, pde) {
 		int count = gen8_pte_count(start, length);
 
-		if (pt == vm->scratch_pt) {
+		if (!pt) {
 			pd->used_pdes++;
 
 			pt = alloc_pt(vm);
@@ -1408,7 +1393,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 	int ret;
 
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		if (pd == vm->scratch_pd) {
+		if (!pd) {
 			pdp->used_pdpes++;
 
 			pd = alloc_pd(vm);
@@ -1431,7 +1416,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 
 unwind_pd:
 	if (!pd->used_pdes) {
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_clear_pdpe(vm, pdp, pdpe);
 		GEM_BUG_ON(!pdp->used_pdpes);
 		pdp->used_pdpes--;
 		free_pd(vm, pd);
@@ -1459,7 +1444,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 	int ret;
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-		if (pml4->pdps[pml4e] == vm->scratch_pdp) {
+		if (!pdp) {
 			pdp = alloc_pdp(vm);
 			if (IS_ERR(pdp))
 				goto unwind;
@@ -1477,7 +1462,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 
 unwind_pdp:
 	if (!pdp->used_pdpes) {
-		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+		gen8_ppgtt_clear_pm4le(vm, pml4, pml4e);
 		free_pdp(vm, pdp);
 	}
 unwind:
@@ -1510,7 +1495,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 unwind:
 	start -= from;
 	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_clear_pdpe(vm, pdp, pdpe);
 		free_pd(vm, pd);
 	}
 	pdp->used_pdpes = 0;
@@ -1673,8 +1658,6 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 		const unsigned int count = min(num_entries, GEN6_PTES - pte);
 		gen6_pte_t *vaddr;
 
-		GEM_BUG_ON(pt == vm->scratch_pt);
-
 		num_entries -= count;
 
 		GEM_BUG_ON(count > pt->used_ptes);
@@ -1710,8 +1693,6 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 	struct sgt_dma iter = sgt_dma(vma);
 	gen6_pte_t *vaddr;
 
-	GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
-
 	vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
 	do {
 		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1749,7 +1730,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 	gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
 		const unsigned int count = gen6_pte_count(start, length);
 
-		if (pt == vm->scratch_pt) {
+		if (!pt) {
 			pt = alloc_pt(vm);
 			if (IS_ERR(pt))
 				goto unwind_out;
@@ -1784,8 +1765,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 {
 	struct i915_address_space * const vm = &ppgtt->base.vm;
-	struct i915_page_table *unused;
-	u32 pde;
 	int ret;
 
 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
@@ -1795,23 +1774,13 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 					 I915_CACHE_NONE,
 					 PTE_READ_ONLY);
-
-	vm->scratch_pt = alloc_pt(vm);
-	if (IS_ERR(vm->scratch_pt)) {
-		cleanup_scratch_page(vm);
-		return PTR_ERR(vm->scratch_pt);
-	}
-
-	gen6_initialize_pt(vm, vm->scratch_pt);
-	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
-		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
+	fill_page_dma_32(vm, &vm->scratch_page, vm->scratch_pte);
 
 	return 0;
 }
 
 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
 {
-	free_pt(vm, vm->scratch_pt);
 	cleanup_scratch_page(vm);
 }
 
@@ -1821,7 +1790,7 @@ static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
 	u32 pde;
 
 	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
-		if (pt != ppgtt->base.vm.scratch_pt)
+		if (pt)
 			free_pt(&ppgtt->base.vm, pt);
 }
 
@@ -1873,7 +1842,6 @@ static int pd_vma_bind(struct i915_vma *vma,
 static void pd_vma_unbind(struct i915_vma *vma)
 {
 	struct gen6_hw_ppgtt *ppgtt = vma->private;
-	struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
 	struct i915_page_table *pt;
 	unsigned int pde;
 
@@ -1882,11 +1850,11 @@ static void pd_vma_unbind(struct i915_vma *vma)
 
 	/* Free all no longer used page tables */
 	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
-		if (pt->used_ptes || pt == scratch_pt)
+		if (!pt || pt->used_ptes)
 			continue;
 
 		free_pt(&ppgtt->base.vm, pt);
-		ppgtt->base.pd.page_table[pde] = scratch_pt;
+		ppgtt->base.pd.page_table[pde] = NULL;
 	}
 
 	ppgtt->scan_for_unused_pt = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 86065d75b3ac..c30565f25b3e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -294,9 +294,6 @@ struct i915_address_space {
 	u64 scratch_pte;
 	int scratch_order;
 	struct i915_page_dma scratch_page;
-	struct i915_page_table *scratch_pt;
-	struct i915_page_directory *scratch_pd;
-	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
 
 	/**
 	 * List of vma currently bound.
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux