On 5/22/2015 6:05 PM, Mika Kuoppala wrote:
When we setup page directories and tables, we point the entries
to a to the next level scratch structure. Make this generic
by introducing a fill_page_dma which maps and flushes. We also
need 32 bit variant for legacy gens.
v2: Fix flushes and handle valleyview (Ville)
Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 71 +++++++++++++++++++------------------
1 file changed, 37 insertions(+), 34 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f747bd3..d020b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -330,6 +330,31 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
memset(p, 0, sizeof(*p));
}
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+ const uint64_t val)
+{
+ int i;
+ uint64_t * const vaddr = kmap_atomic(p->page);
+
+ for (i = 0; i < 512; i++)
+ vaddr[i] = val;
+
+ if (!HAS_LLC(dev) && !IS_VALLEYVIEW(dev))
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
Cherryview returns true to IS_VALLEYVIEW().
You can use(!HAS_LLC && IS_CHERRYVIEW) instead to flush in chv, but not
in vlv... But to make it bxt-proof, (!HAS_LLC && INTEL_INFO(dev)->gen >=
8) is probably better.
+
+ kunmap_atomic(vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+ const uint32_t val32)
+{
+ uint64_t v = val32;
+
+ v = v << 32 | val32;
+
+ fill_page_dma(dev, p, v);
+}
+
static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
{
cleanup_page_dma(dev, &pt->base);
@@ -340,19 +365,11 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- gen8_pte_t *pt_vaddr, scratch_pte;
- int i;
-
- pt_vaddr = kmap_atomic(pt->base.page);
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true);
+ gen8_pte_t scratch_pte;
- for (i = 0; i < GEN8_PTES; i++)
- pt_vaddr[i] = scratch_pte;
+ scratch_pte = gen8_pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ fill_page_dma(vm->dev, &pt->base, scratch_pte);
}
static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -585,20 +602,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
- gen8_pde_t *page_directory;
- struct i915_page_table *pt;
- int i;
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_pde_t scratch_pde;
- page_directory = kmap_atomic(pd->base.page);
- pt = ppgtt->scratch_pt;
- for (i = 0; i < I915_PDES; i++)
- /* Map the PDE to the page table */
- __gen8_do_map_pt(page_directory + i, pt, vm->dev);
+ scratch_pde = gen8_pde_encode(vm->dev, ppgtt->scratch_pt->base.daddr,
+ I915_CACHE_LLC);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(page_directory, PAGE_SIZE);
- kunmap_atomic(page_directory);
+ fill_page_dma(vm->dev, &pd->base, scratch_pde);
}
static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
@@ -1292,22 +1302,15 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
+ struct i915_page_table *pt)
{
- gen6_pte_t *pt_vaddr, scratch_pte;
- int i;
+ gen6_pte_t scratch_pte;
WARN_ON(vm->scratch.addr == 0);
- scratch_pte = vm->pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true, 0);
-
- pt_vaddr = kmap_atomic(pt->base.page);
-
- for (i = 0; i < GEN6_PTES; i++)
- pt_vaddr[i] = scratch_pte;
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- kunmap_atomic(pt_vaddr);
+ fill_page_dma_32(vm->dev, &pt->base, scratch_pte);
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx