And track the existence of such a binding similar to the aliasing ppgtt case. Speeds up binding/unbinding in the common case where we only need a ppgtt binding (which is accessed in a cpu coherent fashion by the gpu) and no gloabl gtt binding (which needs uc writes for the ptes). This patch just puts the required tracking in place. v2: Check that global gtt mappings exist in the error_state capture code (with Chris Wilson's llc reloc patches batchbuffers are no longer relocated as mappable in all situations, so this matters). Suggested by Chris Wilson. v3: Adapted to Chris' latest llc-reloc patches. v4: Fix a bug in the i915 error state capture code noticed by Chris Wilson. Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 12 ++++++++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++++ drivers/gpu/drm/i915/i915_irq.c | 3 ++- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f266acd..5b3ee66 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -874,6 +874,7 @@ struct drm_i915_gem_object { unsigned int cache_level:2; unsigned int has_aliasing_ppgtt_mapping:1; + unsigned int has_global_gtt_mapping:1; struct page **pages; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f37527c..5892dac 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1153,6 +1153,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); + if (obj->tiling_mode == I915_TILING_NONE) ret = i915_gem_object_put_fence(obj); else @@ -2102,7 +2105,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) trace_i915_gem_object_unbind(obj); - i915_gem_gtt_unbind_object(obj); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_unbind_object(obj); if (obj->has_aliasing_ppgtt_mapping) { i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); obj->has_aliasing_ppgtt_mapping = 0; @@ -2957,7 +2961,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - i915_gem_gtt_bind_object(obj, cache_level); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, cache_level); if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); @@ -3344,6 +3349,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, return ret; } + if (!obj->has_global_gtt_mapping && map_and_fenceable) + i915_gem_gtt_bind_object(obj, obj->cache_level); + if (obj->pin_count++ == 0) { if (!obj->active) list_move_tail(&obj->mm_list, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index bf33eaf..72be806 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -394,12 +394,16 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT, obj->pages, agp_type); + + obj->has_global_gtt_mapping = 1; } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); + + obj->has_global_gtt_mapping = 0; } void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index afd4e03..da95eb5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -727,7 +727,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv, goto unwind; local_irq_save(flags); - if (reloc_offset < dev_priv->mm.gtt_mappable_end) { + if (reloc_offset < dev_priv->mm.gtt_mappable_end && + src->has_global_gtt_mapping) { void __iomem *s; /* Simply ignore tiling or any overlapping fence. -- 1.7.9