for proper refcounting to take place as we use i915_add_request() for it. i915_add_request() also takes the context for the request from ring->last_context so move the null state batch submission after the ring context has been set. v2: we need to check for correct ring now (Ville Syrjälä) v3: no need to expose i915_gem_move_object_to_active (Chris Wilson) Cc: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Damien Lespiau <damien.lespiau@xxxxxxxxx> Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_context.c | 16 ++++++++-------- drivers/gpu/drm/i915/i915_gem_render_state.c | 17 +++++++++++++++-- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b90ec69..0bf7bfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2206,6 +2206,7 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); void i915_vma_move_to_active(struct i915_vma *vma, struct intel_ring_buffer *ring); +void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 440979f..d9bf694 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2084,7 +2084,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, return i915_gem_object_move_to_active(vma->obj, ring); } -static void +void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f220c94..6a2d847a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -700,21 +700,21 @@ static int do_switch(struct intel_ring_buffer *ring, /* obj is kept alive until the next request by its active ref */ i915_gem_object_ggtt_unpin(from->obj); i915_gem_context_unreference(from); - } else { - if (to->is_initialized == false) { - ret = i915_gem_render_state_init(ring); - if (ret) - DRM_ERROR("init render state: %d\n", ret); - } } - to->is_initialized = true; - done: i915_gem_context_reference(to); ring->last_context = to; to->last_ring = ring; + if (ring->id == RCS && !to->is_initialized && from == NULL) { + ret = i915_gem_render_state_init(ring); + if (ret) + DRM_ERROR("init render state: %d\n", ret); + } + + to->is_initialized = true; + return 0; unpin_out: diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 392aa7b..82abe1e 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -164,9 +164,12 @@ int i915_gem_render_state_init(struct intel_ring_buffer *ring) const int gen = INTEL_INFO(ring->dev)->gen; struct i915_render_state *so; const struct intel_renderstate_rodata *rodata; - u32 seqno; + struct i915_vma *vma; int ret; + if (WARN_ON(ring->id != RCS)) + return -ENOENT; + rodata = render_state_get_rodata(ring->dev, gen); if (rodata == NULL) return 0; @@ -186,7 +189,17 @@ int i915_gem_render_state_init(struct intel_ring_buffer *ring) if (ret) goto out; - ret = i915_add_request(ring, &seqno); + vma = i915_gem_obj_to_ggtt(so->obj); + if (vma == NULL) { + ret = -ENOSPC; + goto out; + } + + i915_vma_move_to_active(vma, ring); + + ret = __i915_add_request(ring, NULL, so->obj, NULL); + if (ret) + i915_gem_object_move_to_inactive(so->obj); out: render_state_free(so); -- 1.7.9.5 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx