Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > We use timeline->mutex to protect modifications to > context->active_count, and the associated enable/disable callbacks. > Due to complications with engine-pm barrier there is a path where we used > a "superlock" to provide serialised protect and so could not > unconditionally assert with lockdep that it was always held. However, > we can mark the mutex as taken (noting that we may be nested underneath > ourselves) which means we can be reassured the right timeline->mutex is > always treated as held and let lockdep roam free. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/gt/intel_context.h | 3 +++ > drivers/gpu/drm/i915/gt/intel_context_types.h | 2 +- > drivers/gpu/drm/i915/gt/intel_engine_pm.c | 14 ++++++++++++++ > drivers/gpu/drm/i915/gt/intel_timeline.c | 4 ++++ > drivers/gpu/drm/i915/i915_request.c | 3 ++- > 5 files changed, 24 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h > index 053a1307ecb4..dd742ac2fbdb 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.h > +++ b/drivers/gpu/drm/i915/gt/intel_context.h > @@ -89,17 +89,20 @@ void intel_context_exit_engine(struct intel_context *ce); > > static inline void intel_context_enter(struct intel_context *ce) > { > + lockdep_assert_held(&ce->timeline->mutex); > if (!ce->active_count++) > ce->ops->enter(ce); > } > > static inline void intel_context_mark_active(struct intel_context *ce) > { > + lockdep_assert_held(&ce->timeline->mutex); > ++ce->active_count; > } > > static inline void intel_context_exit(struct intel_context *ce) > { > + lockdep_assert_held(&ce->timeline->mutex); > GEM_BUG_ON(!ce->active_count); > if (!--ce->active_count) > ce->ops->exit(ce); > diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h > index d8ce266c049f..bf9cedfccbf0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context_types.h > +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h > @@ -59,7 +59,7 @@ struct intel_context { > u32 *lrc_reg_state; > u64 lrc_desc; > > - unsigned int active_count; /* notionally protected by timeline->mutex */ > + unsigned int active_count; /* protected by timeline->mutex */ > > atomic_t pin_count; > struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c > index f3f0109f9e22..5f03f7dcad72 100644 > --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c > +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c > @@ -37,6 +37,16 @@ static int __engine_unpark(struct intel_wakeref *wf) > return 0; > } > > +static inline void __timeline_mark_lock(struct intel_context *ce) > +{ > + mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); > +} > + > +static inline void __timeline_mark_unlock(struct intel_context *ce) > +{ > + mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); > +} > + > static bool switch_to_kernel_context(struct intel_engine_cs *engine) > { > struct i915_request *rq; > @@ -61,6 +71,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) > * retiring the last request, thus all rings should be empty and > * all timelines idle. > */ > + __timeline_mark_lock(engine->kernel_context); > + > rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); > if (IS_ERR(rq)) > /* Context switch failed, hope for the best! Maybe reset? */ > @@ -80,6 +92,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) > __intel_wakeref_defer_park(&engine->wakeref); > __i915_request_queue(rq, NULL); > > + __timeline_mark_unlock(engine->kernel_context); > + > return false; > } > > diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c > index 7b476cd55dac..eafd94d5e211 100644 > --- a/drivers/gpu/drm/i915/gt/intel_timeline.c > +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c > @@ -338,6 +338,8 @@ void intel_timeline_enter(struct intel_timeline *tl) > { > struct intel_gt_timelines *timelines = &tl->gt->timelines; > > + lockdep_assert_held(&tl->mutex); > + > GEM_BUG_ON(!atomic_read(&tl->pin_count)); > if (tl->active_count++) > return; > @@ -352,6 +354,8 @@ void intel_timeline_exit(struct intel_timeline *tl) > { > struct intel_gt_timelines *timelines = &tl->gt->timelines; > > + lockdep_assert_held(&tl->mutex); > + > GEM_BUG_ON(!tl->active_count); > if (--tl->active_count) > return; > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c > index 174c7fdd02ff..7edcd0fef5c6 100644 > --- a/drivers/gpu/drm/i915/i915_request.c > +++ b/drivers/gpu/drm/i915/i915_request.c > @@ -1087,7 +1087,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) > * precludes optimising to use semaphores serialisation of a single > * timeline across engines. > */ > - prev = rcu_dereference_protected(timeline->last_request.request, 1); > + prev = rcu_dereference_protected(timeline->last_request.request, > + lockdep_is_held(&timeline->mutex)); > if (prev && !i915_request_completed(prev)) { > if (is_power_of_2(prev->engine->mask | rq->engine->mask)) > i915_sw_fence_await_sw_fence(&rq->submit, > -- > 2.23.0.rc1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx