On Mon, Aug 12, 2024 at 7:09 AM Akhil P Oommen <quic_akhilpo@xxxxxxxxxxx> wrote: > > On Wed, Aug 07, 2024 at 01:34:27PM +0100, Connor Abbott wrote: > > With a7xx, we need to import a new header for each new generation and > > switch to a different list of registers, instead of making > > backwards-compatible changes. Using the helpers inadvertently made a750 > > use the a740 list of registers, instead use the family directly to fix > > this. > > This won't scale. What about other gpus in the same generation but has a > different register list? You don't see that issue currently because > there are no support for lower tier a7x GPUs yet. GPUs in the same generation always have the same register list. e.g. gen7_4_0 has the same register list as gen7_0_0. kgsl has already moved onto gen8 which redoes everything again and will require a separate codepath, they only have one more gen7 register list compared to us, and I doubt they'll add many more. So the kgsl approach would be pointless over-engineering. Connor > > I think we should move to a "snapshot block list" like in the downstream > driver if you want to simplify the whole logic. Otherwise, we should > leave the chipid check as it is and just fix up a750 configurations. > > -Akhil > > > > > Fixes: f3f8207d8aed ("drm/msm: Add devcoredump support for a750") > > Signed-off-by: Connor Abbott <cwabbott0@xxxxxxxxx> > > --- > > drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 41 ++++++++++++++--------------- > > 1 file changed, 20 insertions(+), 21 deletions(-) > > > > diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c > > index 77146d30bcaa..c641ee7dec78 100644 > > --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c > > +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c > > @@ -390,18 +390,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, > > const u32 *debugbus_blocks, *gbif_debugbus_blocks; > > int i; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > debugbus_blocks = gen7_0_0_debugbus_blocks; > > debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks); > > gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; > > gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); > > - } else if (adreno_is_a740_family(adreno_gpu)) { > > + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { > > debugbus_blocks = gen7_2_0_debugbus_blocks; > > debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks); > > gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; > > gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); > > } else { > > - BUG_ON(!adreno_is_a750(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); > > debugbus_blocks = gen7_9_0_debugbus_blocks; > > debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks); > > gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks; > > @@ -511,7 +511,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, > > const struct a6xx_debugbus_block *cx_debugbus_blocks; > > > > if (adreno_is_a7xx(adreno_gpu)) { > > - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu))); > > + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); > > cx_debugbus_blocks = a7xx_cx_debugbus_blocks; > > nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks); > > } else { > > @@ -662,11 +662,11 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu, > > const struct gen7_sptp_cluster_registers *dbgahb_clusters; > > unsigned dbgahb_clusters_size; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > dbgahb_clusters = gen7_0_0_sptp_clusters; > > dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters); > > } else { > > - BUG_ON(!adreno_is_a740_family(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); > > dbgahb_clusters = gen7_2_0_sptp_clusters; > > dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters); > > } > > @@ -820,14 +820,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu, > > const struct gen7_cluster_registers *clusters; > > unsigned clusters_size; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > clusters = gen7_0_0_clusters; > > clusters_size = ARRAY_SIZE(gen7_0_0_clusters); > > - } else if (adreno_is_a740_family(adreno_gpu)) { > > + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { > > clusters = gen7_2_0_clusters; > > clusters_size = ARRAY_SIZE(gen7_2_0_clusters); > > } else { > > - BUG_ON(!adreno_is_a750(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); > > clusters = gen7_9_0_clusters; > > clusters_size = ARRAY_SIZE(gen7_9_0_clusters); > > } > > @@ -895,7 +895,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, > > if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) > > return; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3); > > } > > > > @@ -925,7 +925,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, > > datasize); > > > > out: > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0); > > } > > } > > @@ -958,14 +958,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu, > > unsigned num_shader_blocks; > > int i; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > shader_blocks = gen7_0_0_shader_blocks; > > num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks); > > - } else if (adreno_is_a740_family(adreno_gpu)) { > > + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { > > shader_blocks = gen7_2_0_shader_blocks; > > num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks); > > } else { > > - BUG_ON(!adreno_is_a750(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); > > shader_blocks = gen7_9_0_shader_blocks; > > num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks); > > } > > @@ -1350,14 +1350,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu, > > const u32 *pre_crashdumper_regs; > > const struct gen7_reg_list *reglist; > > > > - if (adreno_is_a730(adreno_gpu)) { > > + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { > > reglist = gen7_0_0_reg_list; > > pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; > > - } else if (adreno_is_a740_family(adreno_gpu)) { > > + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { > > reglist = gen7_2_0_reg_list; > > pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; > > } else { > > - BUG_ON(!adreno_is_a750(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); > > reglist = gen7_9_0_reg_list; > > pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers; > > } > > @@ -1407,8 +1407,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu, > > struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); > > const u32 *regs; > > > > - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) || > > - adreno_is_a750(adreno_gpu))); > > + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); > > regs = gen7_0_0_post_crashdumper_registers; > > > > a7xx_get_ahb_gpu_registers(gpu, > > @@ -1514,11 +1513,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu, > > const struct a6xx_indexed_registers *indexed_regs; > > int i, indexed_count, mempool_count; > > > > - if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) { > > + if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) { > > indexed_regs = a7xx_indexed_reglist; > > indexed_count = ARRAY_SIZE(a7xx_indexed_reglist); > > } else { > > - BUG_ON(!adreno_is_a750(adreno_gpu)); > > + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); > > indexed_regs = gen7_9_0_cp_indexed_reg_list; > > indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list); > > } > > > > -- > > 2.31.1 > >