Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > In the next patch, we will use the irq_posted technique for another > engine interrupt, rather than use two members for the atomic updates, we > can use two bits of one instead. First, we need to update the > breadcrumbs to use the new common engine->irq_posted. > > v2: Use set_bit() rather than __set_bit() to ensure atomicity with > respect to other bits in the mask > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx Missing '>' in the end of line Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 2 +- > drivers/gpu/drm/i915/i915_irq.c | 2 +- > drivers/gpu/drm/i915/intel_breadcrumbs.c | 9 ++++----- > drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +++- > 4 files changed, 9 insertions(+), 8 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index ccb4fb804fca..652e40c6ca40 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -3964,7 +3964,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req) > */ > if (engine->irq_seqno_barrier && > rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && > - cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { > + test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { > struct task_struct *tsk; > > /* The ordering of irq_posted versus applying the barrier > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index 6fefc34ef602..7e087c344265 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -1033,7 +1033,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) > > static void notify_ring(struct intel_engine_cs *engine) > { > - smp_store_mb(engine->breadcrumbs.irq_posted, true); > + set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); > if (intel_engine_wakeup(engine)) > trace_i915_gem_request_notify(engine); > } > diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c > index c6fa77177615..6b24f2544b6b 100644 > --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c > +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c > @@ -81,7 +81,7 @@ static void irq_enable(struct intel_engine_cs *engine) > * we still need to force the barrier before reading the seqno, > * just in case. > */ > - engine->breadcrumbs.irq_posted = true; > + set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); > > /* Caller disables interrupts */ > spin_lock(&engine->i915->irq_lock); > @@ -95,8 +95,6 @@ static void irq_disable(struct intel_engine_cs *engine) > spin_lock(&engine->i915->irq_lock); > engine->irq_disable(engine); > spin_unlock(&engine->i915->irq_lock); > - > - engine->breadcrumbs.irq_posted = false; > } > > static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) > @@ -257,7 +255,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, > * in case the seqno passed. > */ > __intel_breadcrumbs_enable_irq(b); > - if (READ_ONCE(b->irq_posted)) > + if (test_bit(ENGINE_IRQ_BREADCRUMB, > + &engine->irq_posted)) > wake_up_process(to_wait(next)->tsk); > } > > @@ -610,7 +609,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) > if (intel_engine_has_waiter(engine)) { > b->timeout = wait_timeout(); > __intel_breadcrumbs_enable_irq(b); > - if (READ_ONCE(b->irq_posted)) > + if (test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) > wake_up_process(b->first_wait->tsk); > } else { > /* sanitize the IMR and unmask any auxiliary interrupts */ > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index dbd32585f27a..a9ea84ea3155 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -211,6 +211,9 @@ struct intel_engine_cs { > > struct intel_render_state *render_state; > > + unsigned long irq_posted; > +#define ENGINE_IRQ_BREADCRUMB 0 > + > /* Rather than have every client wait upon all user interrupts, > * with the herd waking after every interrupt and each doing the > * heavyweight seqno dance, we delegate the task (of being the > @@ -229,7 +232,6 @@ struct intel_engine_cs { > */ > struct intel_breadcrumbs { > struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ > - bool irq_posted; > > spinlock_t lock; /* protects the lists of requests; irqsafe */ > struct rb_root waiters; /* sorted by retirement, priority */ > -- > 2.11.0 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx