After the request is cancelled, we then need to remove it from the global execution timeline and return it to the context timeline, the inverse of submit_request(). v2: Move manipulation of struct intel_wait to helpers Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_request.c | 66 ++++++++++++++++++++-- drivers/gpu/drm/i915/i915_gem_request.h | 3 + drivers/gpu/drm/i915/intel_breadcrumbs.c | 17 +++++- drivers/gpu/drm/i915/intel_ringbuffer.h | 37 +++++++++++- drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 6 +- 5 files changed, 117 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index d18f450977e0..76e31cd7840e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -441,6 +441,55 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request) spin_unlock_irqrestore(&engine->timeline->lock, flags); } +void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + struct intel_timeline *timeline; + + assert_spin_locked(&engine->timeline->lock); + + /* Only unwind in reverse order, required so that the per-context list + * is kept in seqno/ring order. + */ + GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); + engine->timeline->seqno--; + + /* We may be recursing from the signal callback of another i915 fence */ + spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + request->global_seqno = 0; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) + intel_engine_cancel_signaling(request); + spin_unlock(&request->lock); + + /* Transfer back from the global per-engine timeline to per-context */ + timeline = request->timeline; + GEM_BUG_ON(timeline == engine->timeline); + + spin_lock(&timeline->lock); + list_move(&request->link, &timeline->requests); + spin_unlock(&timeline->lock); + + /* We don't need to wake_up any waiters on request->execute, they + * will get woken by any other event or us re-adding this request + * to the engine timeline (__i915_gem_request_submit()). The waiters + * should be quite adapt at finding that the request now has a new + * global_seqno to the one they went to sleep on. + */ +} + +void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->timeline->lock, flags); + + __i915_gem_request_unsubmit(request); + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static int __i915_sw_fence_call submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { @@ -1034,15 +1083,15 @@ long i915_wait_request(struct drm_i915_gem_request *req, if (flags & I915_WAIT_LOCKED) add_wait_queue(errq, &reset); - intel_wait_init(&wait, i915_gem_request_global_seqno(req)); + intel_wait_init(&wait); +restart: reset_wait_queue(&req->execute, &exec); - if (!wait.seqno) { + if (!intel_wait_update_request(&wait, req)) { do { set_current_state(state); - wait.seqno = i915_gem_request_global_seqno(req); - if (wait.seqno) + if (intel_wait_update_request(&wait, req)) break; if (flags & I915_WAIT_LOCKED && @@ -1070,7 +1119,7 @@ long i915_wait_request(struct drm_i915_gem_request *req, if (timeout < 0) goto complete; - GEM_BUG_ON(!wait.seqno); + GEM_BUG_ON(!intel_wait_has_seqno(&wait)); } GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); @@ -1100,7 +1149,7 @@ long i915_wait_request(struct drm_i915_gem_request *req, timeout = io_schedule_timeout(timeout); if (intel_wait_complete(&wait) && - i915_gem_request_global_seqno(req) == wait.seqno) + intel_wait_check_request(&wait, req)) break; set_current_state(state); @@ -1135,6 +1184,11 @@ long i915_wait_request(struct drm_i915_gem_request *req, /* Only spin if we know the GPU is processing this request */ if (i915_spin_request(req, state, 2)) break; + + if (!intel_wait_check_request(&wait, req)) { + intel_engine_remove_wait(req->engine, &wait); + goto restart; + } } intel_engine_remove_wait(req->engine, &wait); diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index b81f6709905c..5f73d8c0a38a 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -274,6 +274,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); void __i915_gem_request_submit(struct drm_i915_gem_request *request); void i915_gem_request_submit(struct drm_i915_gem_request *request); +void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request); +void i915_gem_request_unsubmit(struct drm_i915_gem_request *request); + struct intel_rps_client; #define NO_WAITBOOST ERR_PTR(-1) #define IS_RPS_CLIENT(p) (!IS_ERR(p)) diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 46e7fca4b189..ac8663d8e73f 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -453,7 +453,12 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->lock); } -static bool signal_complete(struct drm_i915_gem_request *request) +static bool signal_valid(const struct drm_i915_gem_request *request) +{ + return intel_wait_check_request(&request->signaling.wait, request); +} + +static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; @@ -462,7 +467,7 @@ static bool signal_complete(struct drm_i915_gem_request *request) * signalled that this wait is already completed. */ if (intel_wait_complete(&request->signaling.wait)) - return true; + return signal_valid(request); /* Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. @@ -542,13 +547,21 @@ static int intel_breadcrumbs_signaler(void *arg) i915_gem_request_put(request); } else { + DEFINE_WAIT(exec); + if (kthread_should_stop()) { GEM_BUG_ON(request); break; } + if (request) + add_wait_queue(&request->execute, &exec); + schedule(); + if (request) + remove_wait_queue(&request->execute, &exec); + if (kthread_should_park()) kthread_parkme(); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 45d2c2fa946e..fe724d477362 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -582,10 +582,45 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); -static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) +static inline void intel_wait_init(struct intel_wait *wait) { wait->tsk = current; +} + +static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) +{ + wait->tsk = current; + wait->seqno = seqno; +} + +static inline bool intel_wait_has_seqno(const struct intel_wait *wait) +{ + return wait->seqno; +} + +static inline bool intel_wait_update_seqno(struct intel_wait *wait, u32 seqno) +{ wait->seqno = seqno; + return intel_wait_has_seqno(wait); +} + +static inline bool intel_wait_update_request(struct intel_wait *wait, + struct drm_i915_gem_request *rq) +{ + return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); +} + +static inline bool intel_wait_check_seqno(const struct intel_wait *wait, + u32 seqno) +{ + return wait->seqno == seqno; +} + +static inline bool +intel_wait_check_request(const struct intel_wait *wait, + const struct drm_i915_gem_request *rq) +{ + return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); } static inline bool intel_wait_complete(const struct intel_wait *wait) diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index 6426acc9fdca..621be1ca53d8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -131,7 +131,7 @@ static int igt_random_insert_remove(void *arg) goto out_bitmap; for (n = 0; n < count; n++) - intel_wait_init(&waiters[n], seqno_bias + n); + intel_wait_init_for_seqno(&waiters[n], seqno_bias + n); err = check_rbtree(engine, bitmap, waiters, count); if (err) @@ -197,7 +197,7 @@ static int igt_insert_complete(void *arg) goto out_waiters; for (n = 0; n < count; n++) { - intel_wait_init(&waiters[n], n + seqno_bias); + intel_wait_init_for_seqno(&waiters[n], n + seqno_bias); intel_engine_add_wait(engine, &waiters[n]); __set_bit(n, bitmap); } @@ -318,7 +318,7 @@ static int igt_wakeup_thread(void *arg) while (wait_for_ready(w)) { GEM_BUG_ON(kthread_should_stop()); - intel_wait_init(&wait, w->seqno); + intel_wait_init_for_seqno(&wait, w->seqno); intel_engine_add_wait(w->engine, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx