Rely instead on the insertion of the implicit flush before the seqno breadcrumb. Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem.c | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e574835..b22bfc8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -37,7 +37,6 @@ #include <linux/pci.h> #include <linux/dma-buf.h> -static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, @@ -2003,10 +2002,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) int ret; if (obj->active) { - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - ret = i915_gem_check_olr(obj->ring, obj->last_rendering_seqno); if (ret) @@ -2764,17 +2759,6 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); } -/** Flushes any GPU write domain for the object if it's dirty. */ -static int -i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) -{ - if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) - return 0; - - /* Queue the GPU write cache flushing we need. */ - return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); -} - /** Flushes the GTT write domain for the object if it's dirty. */ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) @@ -2841,10 +2825,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) return 0; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj); if (ret) @@ -2957,10 +2937,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 old_read_domains, old_write_domain; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - if (pipelined != obj->ring) { ret = i915_gem_object_sync(obj, pipelined); if (ret) @@ -3014,12 +2990,6 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) return 0; - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); - if (ret) - return ret; - } - ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -3044,10 +3014,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) return 0; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - if (write || obj->pending_gpu_write) { ret = i915_gem_object_wait_rendering(obj); if (ret) -- 1.7.10.4