Both to reduce GTT wastage due to intermingling colouring and to reduce the number of cached bo allocated in the mappable aperture. Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8728ca2..b7661e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2916,6 +2916,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mm_node *free_space; u32 size, fence_size, fence_alignment, unfenced_alignment; + unsigned search; bool mappable, fenceable; int ret; @@ -2955,16 +2956,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); + search = 0; + if (obj->cache_level != I915_CACHE_NONE && !map_and_fenceable) + search |= DRM_MM_SEARCH_BELOW; + search_free: if (map_and_fenceable) free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end, - false); + search); else free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, size, alignment, obj->cache_level, - false); + search); if (free_space != NULL) { if (map_and_fenceable) @@ -2972,12 +2977,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, drm_mm_get_block_range_generic(free_space, size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end, - 0); + search); else free_space = drm_mm_get_block_generic(free_space, size, alignment, obj->cache_level, - 0); + search); } if (free_space == NULL) { ret = i915_gem_evict_something(dev, size, alignment, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 42f1a1c..57c6a0f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -458,10 +458,13 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_object_pin(obj, 4096, false, false); if (ret) goto err_unref; + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); + pc->gtt_offset = obj->gtt_offset; pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) @@ -1104,10 +1107,12 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); - if (ret != 0) { + ret = i915_gem_object_pin(obj, 4096, false, false); + if (ret != 0) goto err_unref; - } + + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); -- 1.7.10.4