On Tue, Feb 25, 2014 at 11:27:15PM -0800, Ben Widawsky wrote: > On Tue, Feb 25, 2014 at 06:13:44PM -0800, Ben Widawsky wrote: > > This patch converts insert_entries and clear_range, both functions which > > are specific to the VM. These functions tend to encapsulate the gen > > specific PTE writes. Passing absolute addresses to the insert_entries, > > and clear_range will help make the logic clearer within the functions as > > to what's going on. Currently, all callers simply do the appropriate > > page shift, which IMO, ends up looking weird with an upcoming change for > > the gen8 page table allocations. > > > > Up until now, the PPGTT was a funky 2 level page table. GEN8 changes > > this to look more like a 3 level page table, and to that extent we need > > a significant amount more memory simply for the page tables. To address > > this, the allocations will be split up in finer amounts. > > > > v2: Replace size_t with uint64_t (Chris, Imre) > > > > v3: Fix size in gen8_ppgtt_init (Ben) > > Fix Size in i915_gem_suspend_gtt_mappings/restore (Imre) > > > > v4: Wherever length was introduced in the API, the num_entries > > calculation should have been (length - start) as opposed to just length. > > As this is only a bug on clearing entries, it wasn't easily apparent. > > (Ben) > > > > Reviewed-by: Imre Deak <imre.deak@xxxxxxxxx> > > Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> > > Give me a sec on this one.... something is still busted. > Ah, yes. v4 is stupid. v3 is still the right one. I got confused by some future changes I made. Please ignore v4. > > --- > > drivers/gpu/drm/i915/i915_drv.h | 6 +-- > > drivers/gpu/drm/i915/i915_gem_gtt.c | 90 +++++++++++++++++++++---------------- > > 2 files changed, 54 insertions(+), 42 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index 57556fb..ab23bfd 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -652,12 +652,12 @@ struct i915_address_space { > > enum i915_cache_level level, > > bool valid); /* Create a valid PTE */ > > void (*clear_range)(struct i915_address_space *vm, > > - unsigned int first_entry, > > - unsigned int num_entries, > > + uint64_t start, > > + uint64_t length, > > bool use_scratch); > > void (*insert_entries)(struct i915_address_space *vm, > > struct sg_table *st, > > - unsigned int first_entry, > > + uint64_t start, > > enum i915_cache_level cache_level); > > void (*cleanup)(struct i915_address_space *vm); > > }; > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > > index beca571..23e722b 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > > @@ -254,13 +254,15 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, > > } > > > > static void gen8_ppgtt_clear_range(struct i915_address_space *vm, > > - unsigned first_entry, > > - unsigned num_entries, > > + uint64_t start, > > + uint64_t length, > > bool use_scratch) > > { > > struct i915_hw_ppgtt *ppgtt = > > container_of(vm, struct i915_hw_ppgtt, base); > > gen8_gtt_pte_t *pt_vaddr, scratch_pte; > > + unsigned first_entry = start >> PAGE_SHIFT; > > + unsigned num_entries = (length - start) >> PAGE_SHIFT; > > unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; > > unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; > > unsigned last_pte, i; > > @@ -290,12 +292,13 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, > > > > static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, > > struct sg_table *pages, > > - unsigned first_entry, > > + uint64_t start, > > enum i915_cache_level cache_level) > > { > > struct i915_hw_ppgtt *ppgtt = > > container_of(vm, struct i915_hw_ppgtt, base); > > gen8_gtt_pte_t *pt_vaddr; > > + unsigned first_entry = start >> PAGE_SHIFT; > > unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; > > unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; > > struct sg_page_iter sg_iter; > > @@ -539,7 +542,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) > > ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE; > > > > ppgtt->base.clear_range(&ppgtt->base, 0, > > - ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, > > + ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE, > > true); > > > > DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", > > @@ -854,13 +857,15 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) > > > > /* PPGTT support for Sandybdrige/Gen6 and later */ > > static void gen6_ppgtt_clear_range(struct i915_address_space *vm, > > - unsigned first_entry, > > - unsigned num_entries, > > + uint64_t start, > > + uint64_t length, > > bool use_scratch) > > { > > struct i915_hw_ppgtt *ppgtt = > > container_of(vm, struct i915_hw_ppgtt, base); > > gen6_gtt_pte_t *pt_vaddr, scratch_pte; > > + unsigned first_entry = start >> PAGE_SHIFT; > > + unsigned num_entries = (length - start) >> PAGE_SHIFT; > > unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; > > unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; > > unsigned last_pte, i; > > @@ -887,12 +892,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, > > > > static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, > > struct sg_table *pages, > > - unsigned first_entry, > > + uint64_t start, > > enum i915_cache_level cache_level) > > { > > struct i915_hw_ppgtt *ppgtt = > > container_of(vm, struct i915_hw_ppgtt, base); > > gen6_gtt_pte_t *pt_vaddr; > > + unsigned first_entry = start >> PAGE_SHIFT; > > unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; > > unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; > > struct sg_page_iter sg_iter; > > @@ -1024,8 +1030,7 @@ alloc: > > ppgtt->pt_dma_addr[i] = pt_addr; > > } > > > > - ppgtt->base.clear_range(&ppgtt->base, 0, > > - ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); > > + ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); > > ppgtt->debug_dump = gen6_dump_ppgtt; > > > > DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", > > @@ -1089,20 +1094,17 @@ ppgtt_bind_vma(struct i915_vma *vma, > > enum i915_cache_level cache_level, > > u32 flags) > > { > > - const unsigned long entry = vma->node.start >> PAGE_SHIFT; > > - > > WARN_ON(flags); > > > > - vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level); > > + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, > > + cache_level); > > } > > > > static void ppgtt_unbind_vma(struct i915_vma *vma) > > { > > - const unsigned long entry = vma->node.start >> PAGE_SHIFT; > > - > > vma->vm->clear_range(vma->vm, > > - entry, > > - vma->obj->base.size >> PAGE_SHIFT, > > + vma->node.start, > > + vma->obj->base.size, > > true); > > } > > > > @@ -1186,8 +1188,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) > > i915_check_and_clear_faults(dev); > > > > dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, > > - dev_priv->gtt.base.start / PAGE_SIZE, > > - dev_priv->gtt.base.total / PAGE_SIZE, > > + dev_priv->gtt.base.start, > > + dev_priv->gtt.base.total, > > false); > > } > > > > @@ -1201,8 +1203,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) > > > > /* First fill our portion of the GTT with scratch pages */ > > dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, > > - dev_priv->gtt.base.start / PAGE_SIZE, > > - dev_priv->gtt.base.total / PAGE_SIZE, > > + dev_priv->gtt.base.start, > > + dev_priv->gtt.base.total, > > true); > > > > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { > > @@ -1263,10 +1265,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) > > > > static void gen8_ggtt_insert_entries(struct i915_address_space *vm, > > struct sg_table *st, > > - unsigned int first_entry, > > + uint64_t start, > > enum i915_cache_level level) > > { > > struct drm_i915_private *dev_priv = vm->dev->dev_private; > > + unsigned first_entry = start >> PAGE_SHIFT; > > gen8_gtt_pte_t __iomem *gtt_entries = > > (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; > > int i = 0; > > @@ -1308,10 +1311,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, > > */ > > static void gen6_ggtt_insert_entries(struct i915_address_space *vm, > > struct sg_table *st, > > - unsigned int first_entry, > > + uint64_t start, > > enum i915_cache_level level) > > { > > struct drm_i915_private *dev_priv = vm->dev->dev_private; > > + unsigned first_entry = start >> PAGE_SHIFT; > > gen6_gtt_pte_t __iomem *gtt_entries = > > (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; > > int i = 0; > > @@ -1343,11 +1347,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, > > } > > > > static void gen8_ggtt_clear_range(struct i915_address_space *vm, > > - unsigned int first_entry, > > - unsigned int num_entries, > > + uint64_t start, > > + uint64_t length, > > bool use_scratch) > > { > > struct drm_i915_private *dev_priv = vm->dev->dev_private; > > + unsigned first_entry = start >> PAGE_SHIFT; > > + unsigned num_entries = (length - start) >> PAGE_SHIFT; > > gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = > > (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; > > const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; > > @@ -1367,11 +1373,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, > > } > > > > static void gen6_ggtt_clear_range(struct i915_address_space *vm, > > - unsigned int first_entry, > > - unsigned int num_entries, > > + uint64_t start, > > + uint64_t length, > > bool use_scratch) > > { > > struct drm_i915_private *dev_priv = vm->dev->dev_private; > > + unsigned first_entry = start >> PAGE_SHIFT; > > + unsigned num_entries = (length - start) >> PAGE_SHIFT; > > gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = > > (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; > > const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; > > @@ -1404,10 +1412,12 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, > > } > > > > static void i915_ggtt_clear_range(struct i915_address_space *vm, > > - unsigned int first_entry, > > - unsigned int num_entries, > > + uint64_t start, > > + uint64_t length, > > bool unused) > > { > > + unsigned first_entry = start >> PAGE_SHIFT; > > + unsigned num_entries = (length - start) >> PAGE_SHIFT; > > intel_gtt_clear_range(first_entry, num_entries); > > } > > > > @@ -1428,7 +1438,6 @@ static void ggtt_bind_vma(struct i915_vma *vma, > > struct drm_device *dev = vma->vm->dev; > > struct drm_i915_private *dev_priv = dev->dev_private; > > struct drm_i915_gem_object *obj = vma->obj; > > - const unsigned long entry = vma->node.start >> PAGE_SHIFT; > > > > /* If there is no aliasing PPGTT, or the caller needs a global mapping, > > * or we have a global mapping already but the cacheability flags have > > @@ -1444,7 +1453,8 @@ static void ggtt_bind_vma(struct i915_vma *vma, > > if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { > > if (!obj->has_global_gtt_mapping || > > (cache_level != obj->cache_level)) { > > - vma->vm->insert_entries(vma->vm, obj->pages, entry, > > + vma->vm->insert_entries(vma->vm, obj->pages, > > + vma->node.start, > > cache_level); > > obj->has_global_gtt_mapping = 1; > > } > > @@ -1455,7 +1465,9 @@ static void ggtt_bind_vma(struct i915_vma *vma, > > (cache_level != obj->cache_level))) { > > struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; > > appgtt->base.insert_entries(&appgtt->base, > > - vma->obj->pages, entry, cache_level); > > + vma->obj->pages, > > + vma->node.start, > > + cache_level); > > vma->obj->has_aliasing_ppgtt_mapping = 1; > > } > > } > > @@ -1465,11 +1477,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma) > > struct drm_device *dev = vma->vm->dev; > > struct drm_i915_private *dev_priv = dev->dev_private; > > struct drm_i915_gem_object *obj = vma->obj; > > - const unsigned long entry = vma->node.start >> PAGE_SHIFT; > > > > if (obj->has_global_gtt_mapping) { > > - vma->vm->clear_range(vma->vm, entry, > > - vma->obj->base.size >> PAGE_SHIFT, > > + vma->vm->clear_range(vma->vm, > > + vma->node.start, > > + obj->base.size, > > true); > > obj->has_global_gtt_mapping = 0; > > } > > @@ -1477,8 +1489,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma) > > if (obj->has_aliasing_ppgtt_mapping) { > > struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; > > appgtt->base.clear_range(&appgtt->base, > > - entry, > > - obj->base.size >> PAGE_SHIFT, > > + vma->node.start, > > + obj->base.size, > > true); > > obj->has_aliasing_ppgtt_mapping = 0; > > } > > @@ -1563,14 +1575,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, > > > > /* Clear any non-preallocated blocks */ > > drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { > > - const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; > > DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", > > hole_start, hole_end); > > - ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); > > + ggtt_vm->clear_range(ggtt_vm, hole_start, > > + hole_end - hole_start, true); > > } > > > > /* And finally clear the reserved guard page */ > > - ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); > > + ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); > > } > > > > void i915_gem_init_global_gtt(struct drm_device *dev) > > -- > > 1.9.0 > > > > -- > Ben Widawsky, Intel Open Source Technology Center > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Ben Widawsky, Intel Open Source Technology Center _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx