gpu->mmu_context is the MMU context of the last job in the HW queue, which isn't necessarily the same as the context from the bad job. Dump the MMU context from the scheduler determined bad submit to make it work as intended. Fixes: 17e4660ae3d7 ("drm/etnaviv: implement per-process address spaces on MMUv2") Signed-off-by: Lucas Stach <l.stach@xxxxxxxxxxxxxx> --- drivers/gpu/drm/etnaviv/etnaviv_dump.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 44b5f3c35aab..898f84a0fc30 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -130,9 +130,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) return; etnaviv_dump_core = false; - mutex_lock(&gpu->mmu_context->lock); + mutex_lock(&submit->mmu_context->lock); - mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context); + mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */ n_obj = 5; @@ -162,7 +162,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!iter.start) { - mutex_unlock(&gpu->mmu_context->lock); + mutex_unlock(&submit->mmu_context->lock); dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); return; } @@ -174,18 +174,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) memset(iter.hdr, 0, iter.data - iter.start); etnaviv_core_dump_registers(&iter, gpu); - etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size); + etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, gpu->buffer.size, etnaviv_cmdbuf_get_va(&gpu->buffer, - &gpu->mmu_context->cmdbuf_mapping)); + &submit->mmu_context->cmdbuf_mapping)); etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, submit->cmdbuf.vaddr, submit->cmdbuf.size, etnaviv_cmdbuf_get_va(&submit->cmdbuf, - &gpu->mmu_context->cmdbuf_mapping)); + &submit->mmu_context->cmdbuf_mapping)); - mutex_unlock(&gpu->mmu_context->lock); + mutex_unlock(&submit->mmu_context->lock); /* Reserve space for the bomap */ if (n_bomap_pages) { -- 2.39.2