On Wed, Sep 11, 2013 at 11:39:30PM +0100, Chris Wilson wrote: > On Wed, Sep 11, 2013 at 02:57:54PM -0700, Ben Widawsky wrote: > > @@ -464,11 +465,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > > struct intel_ring_buffer *ring, > > bool *need_reloc) > > { > > - struct drm_i915_private *dev_priv = ring->dev->dev_private; > > + struct drm_i915_gem_object *obj = vma->obj; > > struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; > > bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; > > bool need_fence, need_mappable; > > - struct drm_i915_gem_object *obj = vma->obj; > > + u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && > > + !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0; > > int ret; > > > > need_fence = > > @@ -497,14 +499,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > > } > > } > > > > - /* Ensure ppgtt mapping exists if needed */ > > - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { > > - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, > > - obj, obj->cache_level); > > - > > - obj->has_aliasing_ppgtt_mapping = 1; > > - } > > - > > if (entry->offset != vma->node.start) { > > entry->offset = vma->node.start; > > *need_reloc = true; > > @@ -515,9 +509,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > > obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; > > } > > > > - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && > > - !obj->has_global_gtt_mapping) > > - i915_gem_gtt_bind_object(obj, obj->cache_level); > > + vma->vm->bind_vma(vma, obj->cache_level, flags); > > This results in us rebinding into the vma unconditionally once/twice for > every object on every execbuffer, right? > -Chris > Hmm, how does this suit you (untested)? I think it is addressing your concern. Also, I suspect squashing this patch with the previous is probably a good thing to do. static void gen6_ggtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; const unsigned long entry = vma->node.start >> PAGE_SHIFT; /* If there is an aliasing PPGTT, and the user didn't explicitly ask for * the global, just use aliasing */ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { /* If the object is unbound, or we're change the cache bits */ if (!obj->has_global_gtt_mapping || (cache_level != obj->cache_level)) { gen6_ggtt_insert_entries(vma->vm, obj->pages, entry, cache_level); obj->has_global_gtt_mapping = 1; } } /* If put the mapping in the aliasing PPGTT as well as Global if we have * aliasing, but the user requested global. */ if (dev_priv->mm.aliasing_ppgtt && (!obj->has_aliasing_ppgtt_mapping || (cache_level != obj->cache_level))) { gen6_ppgtt_insert_entries(&dev_priv->mm.aliasing_ppgtt->base, vma->obj->pages, entry, cache_level); vma->obj->has_aliasing_ppgtt_mapping = 1; } } -- Ben Widawsky, Intel Open Source Technology Center _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx