From: Rob Clark <robdclark@xxxxxxxxxxxx> commit 14eb0cb4e9a7323c8735cf6c681ed8423ce6ae06 upstream. In theory a context can be destroyed and a new one allocated at the same address, making the pointer comparision to detect when we don't need to update the current pagetables invalid. Instead assign a sequence number to each context on creation, and use this for the check. Fixes: 84c31ee16f90 ("drm/msm/a6xx: Add support for per-instance pagetables") Signed-off-by: Rob Clark <robdclark@xxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 6 +++--- drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 11 ++++++++++- drivers/gpu/drm/msm/msm_drv.c | 3 +++ drivers/gpu/drm/msm/msm_drv.h | 1 + 4 files changed, 17 insertions(+), 4 deletions(-) --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -99,7 +99,7 @@ static void a6xx_set_pagetable(struct a6 u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx == a6xx_gpu->cur_ctx) + if (ctx->seqno == a6xx_gpu->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -132,7 +132,7 @@ static void a6xx_set_pagetable(struct a6 OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, 0x31); - a6xx_gpu->cur_ctx = ctx; + a6xx_gpu->cur_ctx_seqno = ctx->seqno; } static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) @@ -887,7 +887,7 @@ static int a6xx_hw_init(struct msm_gpu * /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - a6xx_gpu->cur_ctx = NULL; + a6xx_gpu->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -19,7 +19,16 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; - struct msm_file_private *cur_ctx; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the context with current pgtables + * installed. Tracked by seqno rather than pointer value to + * avoid dangling pointers, and cases where a ctx can be freed + * and a new one created with the same address. + */ + int cur_ctx_seqno; struct a6xx_gmu gmu; --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -581,6 +581,7 @@ static void load_gpu(struct drm_device * static int context_init(struct drm_device *dev, struct drm_file *file) { + static atomic_t ident = ATOMIC_INIT(0); struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx; @@ -594,6 +595,8 @@ static int context_init(struct drm_devic ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; + ctx->seqno = atomic_inc_return(&ident); + return 0; } --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -58,6 +58,7 @@ struct msm_file_private { int queueid; struct msm_gem_address_space *aspace; struct kref ref; + int seqno; }; enum msm_mdp_plane_property {