Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > When dumping the engine, we print out the current register values. This > requires the rpm wakeref. If the device is alseep, we can assume the > engine is asleep (and the register state is uninteresting) so skip and > only acquire the rpm wakeref if the device is already awake. > > Reported-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> > Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > --- > drivers/gpu/drm/i915/intel_engine_cs.c | 162 ++++++++++++++++++-------------- > drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +- > 2 files changed, 94 insertions(+), 74 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c > index 3efc589a7f37..2df9a2d038ee 100644 > --- a/drivers/gpu/drm/i915/intel_engine_cs.c > +++ b/drivers/gpu/drm/i915/intel_engine_cs.c > @@ -691,7 +691,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) > engine->context_unpin(engine, engine->i915->kernel_context); > } > > -u64 intel_engine_get_active_head(struct intel_engine_cs *engine) > +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) > { > struct drm_i915_private *dev_priv = engine->i915; > u64 acthd; > @@ -707,7 +707,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine) > return acthd; > } > > -u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine) > +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) > { > struct drm_i915_private *dev_priv = engine->i915; > u64 bbaddr; > @@ -1705,73 +1705,20 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len) > } > } > > -void intel_engine_dump(struct intel_engine_cs *engine, > - struct drm_printer *m, > - const char *header, ...) > +static void intel_engine_print_registers(const struct intel_engine_cs *engine, > + struct drm_printer *m) > { > - struct intel_breadcrumbs * const b = &engine->breadcrumbs; > - const struct intel_engine_execlists * const execlists = &engine->execlists; > - struct i915_gpu_error * const error = &engine->i915->gpu_error; > struct drm_i915_private *dev_priv = engine->i915; > - struct drm_i915_gem_request *rq; > - struct rb_node *rb; > - char hdr[80]; > + const struct intel_engine_execlists * const execlists = > + &engine->execlists; > u64 addr; > > - if (header) { > - va_list ap; > - > - va_start(ap, header); > - drm_vprintf(m, header, &ap); > - va_end(ap); > - } > - > - if (i915_terminally_wedged(&engine->i915->gpu_error)) > - drm_printf(m, "*** WEDGED ***\n"); > - > - drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", > - intel_engine_get_seqno(engine), > - intel_engine_last_submit(engine), > - engine->hangcheck.seqno, > - jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), > - engine->timeline->inflight_seqnos); > - drm_printf(m, "\tReset count: %d (global %d)\n", > - i915_reset_engine_count(error, engine), > - i915_reset_count(error)); > - > - rcu_read_lock(); > - > - drm_printf(m, "\tRequests:\n"); > - > - rq = list_first_entry(&engine->timeline->requests, > - struct drm_i915_gem_request, link); > - if (&rq->link != &engine->timeline->requests) > - print_request(m, rq, "\t\tfirst "); > - > - rq = list_last_entry(&engine->timeline->requests, > - struct drm_i915_gem_request, link); > - if (&rq->link != &engine->timeline->requests) > - print_request(m, rq, "\t\tlast "); > - > - rq = i915_gem_find_active_request(engine); > - if (rq) { > - print_request(m, rq, "\t\tactive "); > - drm_printf(m, > - "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", > - rq->head, rq->postfix, rq->tail, > - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, > - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); > - } > - > - drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", > - I915_READ(RING_START(engine->mmio_base)), > - rq ? i915_ggtt_offset(rq->ring->vma) : 0); > - drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", > - I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, > - rq ? rq->ring->head : 0); > - drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", > - I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, > - rq ? rq->ring->tail : 0); > + drm_printf(m, "\tRING_START: 0x%08x\n", > + I915_READ(RING_START(engine->mmio_base))); > + drm_printf(m, "\tRING_HEAD: 0x%08x\n", > + I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); > + drm_printf(m, "\tRING_TAIL: 0x%08x\n", > + I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); > drm_printf(m, "\tRING_CTL: 0x%08x%s\n", > I915_READ(RING_CTL(engine->mmio_base)), > I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); > @@ -1780,6 +1727,11 @@ void intel_engine_dump(struct intel_engine_cs *engine, > I915_READ(RING_MI_MODE(engine->mmio_base)), > I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); > } > + > + if (INTEL_GEN(dev_priv) >= 6) { > + drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); > + } > + > if (HAS_LEGACY_SEMAPHORES(dev_priv)) { > drm_printf(m, "\tSYNC_0: 0x%08x\n", > I915_READ(RING_SYNC_0(engine->mmio_base))); > @@ -1790,8 +1742,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, > I915_READ(RING_SYNC_2(engine->mmio_base))); > } > > - rcu_read_unlock(); > - > addr = intel_engine_get_active_head(engine); > drm_printf(m, "\tACTHD: 0x%08x_%08x\n", > upper_32_bits(addr), lower_32_bits(addr)); > @@ -1853,10 +1803,13 @@ void intel_engine_dump(struct intel_engine_cs *engine, > > rcu_read_lock(); > for (idx = 0; idx < execlists_num_ports(execlists); idx++) { > + struct drm_i915_gem_request *rq; > unsigned int count; > > rq = port_unpack(&execlists->port[idx], &count); > if (rq) { > + char hdr[80]; > + > snprintf(hdr, sizeof(hdr), > "\t\tELSP[%d] count=%d, rq: ", > idx, count); > @@ -1875,6 +1828,77 @@ void intel_engine_dump(struct intel_engine_cs *engine, > drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", > I915_READ(RING_PP_DIR_DCLV(engine))); > } > +} > + > +void intel_engine_dump(struct intel_engine_cs *engine, > + struct drm_printer *m, > + const char *header, ...) > +{ > + struct intel_breadcrumbs * const b = &engine->breadcrumbs; > + const struct intel_engine_execlists * const execlists = &engine->execlists; > + struct i915_gpu_error * const error = &engine->i915->gpu_error; > + struct drm_i915_gem_request *rq; > + struct rb_node *rb; > + > + if (header) { > + va_list ap; > + > + va_start(ap, header); > + drm_vprintf(m, header, &ap); > + va_end(ap); > + } > + > + if (i915_terminally_wedged(&engine->i915->gpu_error)) > + drm_printf(m, "*** WEDGED ***\n"); > + > + drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", > + intel_engine_get_seqno(engine), > + intel_engine_last_submit(engine), > + engine->hangcheck.seqno, > + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), > + engine->timeline->inflight_seqnos); > + drm_printf(m, "\tReset count: %d (global %d)\n", > + i915_reset_engine_count(error, engine), > + i915_reset_count(error)); > + > + rcu_read_lock(); > + > + drm_printf(m, "\tRequests:\n"); > + > + rq = list_first_entry(&engine->timeline->requests, > + struct drm_i915_gem_request, link); > + if (&rq->link != &engine->timeline->requests) > + print_request(m, rq, "\t\tfirst "); > + > + rq = list_last_entry(&engine->timeline->requests, > + struct drm_i915_gem_request, link); > + if (&rq->link != &engine->timeline->requests) > + print_request(m, rq, "\t\tlast "); > + > + rq = i915_gem_find_active_request(engine); > + if (rq) { > + print_request(m, rq, "\t\tactive "); > + drm_printf(m, > + "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", > + rq->head, rq->postfix, rq->tail, > + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, > + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); > + drm_printf(m, "\t\tring->start: 0x%08x\n", > + i915_ggtt_offset(rq->ring->vma)); > + drm_printf(m, "\t\tring->head: 0x%08x\n", > + rq->ring->head); > + drm_printf(m, "\t\tring->tail: 0x%08x\n", > + rq->ring->tail); > + } > + > + rcu_read_unlock(); > + > + if (intel_runtime_pm_get_if_in_use(engine->i915)) { > + intel_engine_print_registers(engine, m); > + intel_runtime_pm_put(engine->i915); > + } else { > + drm_printf(m, "Device is alseep; skipping register dump\n"); > + } s/alseep/asleep Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > > spin_lock_irq(&engine->timeline->lock); > list_for_each_entry(rq, &engine->timeline->requests, link) > @@ -1897,10 +1921,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, > } > spin_unlock_irq(&b->rb_lock); > > - if (INTEL_GEN(dev_priv) >= 6) { > - drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); > - } > - > drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", > engine->irq_posted, > yesno(test_bit(ENGINE_IRQ_BREADCRUMB, > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index 8f1a4badf812..51523ad049de 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -659,7 +659,7 @@ intel_engine_flag(const struct intel_engine_cs *engine) > } > > static inline u32 > -intel_read_status_page(struct intel_engine_cs *engine, int reg) > +intel_read_status_page(const struct intel_engine_cs *engine, int reg) > { > /* Ensure that the compiler doesn't optimize away the load. */ > return READ_ONCE(engine->status_page.page_addr[reg]); > @@ -817,8 +817,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); > int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); > int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); > > -u64 intel_engine_get_active_head(struct intel_engine_cs *engine); > -u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); > +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); > +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); > > static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) > { > -- > 2.16.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx