> -----Original Message----- > From: amd-gfx [mailto:amd-gfx-bounces at lists.freedesktop.org] On Behalf > Of Tom St Denis > Sent: Monday, May 15, 2017 9:49 AM > To: amd-gfx at lists.freedesktop.org > Cc: StDenis, Tom > Subject: [PATCH 5/5] drm/amd/amdgpu: Use modern 32/64-bit types in gfx6 > > Switch to uintNN_t from "uNN" types to be more consistent > with modern coding styles. I think technically the uNN types are native for the kernel. the c99 types are just for convenience. Seems like a lot of churn to me. Alex > > Signed-off-by: Tom St Denis <tom.stdenis at amd.com> > --- > drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 150 +++++++++++++++++------ > ----------- > 1 file changed, 75 insertions(+), 75 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c > b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c > index 1351f9230fcd..a6c88f98df08 100644 > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c > @@ -68,8 +68,8 @@ MODULE_FIRMWARE("radeon/hainan_me.bin"); > MODULE_FIRMWARE("radeon/hainan_ce.bin"); > MODULE_FIRMWARE("radeon/hainan_rlc.bin"); > > -static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); > -static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile > u32 *buffer); > +static uint32_t gfx_v6_0_get_csb_size(struct amdgpu_device *adev); > +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile > uint32_t *buffer); > //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev); > static void gfx_v6_0_init_pg(struct amdgpu_device *adev); > > @@ -83,7 +83,7 @@ static void gfx_v6_0_init_pg(struct amdgpu_device > *adev); > #define MACRO_TILE_ASPECT(x) ((x) << 18) > #define NUM_BANKS(x) ((x) << 20) > > -static const u32 verde_rlc_save_restore_register_list[] = > +static const uint32_t verde_rlc_save_restore_register_list[] = > { > (0x8000 << 16) | (0x98f4 >> 2), > 0x00000000, > @@ -393,8 +393,8 @@ static int gfx_v6_0_init_microcode(struct > amdgpu_device *adev) > > static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) > { > - const u32 num_tile_mode_states = ARRAY_SIZE(adev- > >gfx.config.tile_mode_array); > - u32 reg_offset, split_equal_to_row_size, *tilemode; > + const uint32_t num_tile_mode_states = ARRAY_SIZE(adev- > >gfx.config.tile_mode_array); > + uint32_t reg_offset, split_equal_to_row_size, *tilemode; > > memset(adev->gfx.config.tile_mode_array, 0, sizeof(adev- > >gfx.config.tile_mode_array)); > tilemode = adev->gfx.config.tile_mode_array; > @@ -1089,10 +1089,10 @@ static void > gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) > } > } > > -static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 > se_num, > - u32 sh_num, u32 instance) > +static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, uint32_t > se_num, > + uint32_t sh_num, uint32_t instance) > { > - u32 data; > + uint32_t data; > > if (instance == 0xffffffff) > data = REG_SET_FIELD(0, GRBM_GFX_INDEX, > INSTANCE_BROADCAST_WRITES, 1); > @@ -1114,14 +1114,14 @@ static void gfx_v6_0_select_se_sh(struct > amdgpu_device *adev, u32 se_num, > WREG32(mmGRBM_GFX_INDEX, data); > } > > -static u32 gfx_v6_0_create_bitmask(u32 bit_width) > +static uint32_t gfx_v6_0_create_bitmask(uint32_t bit_width) > { > - return (u32)(((u64)1 << bit_width) - 1); > + return (uint32_t)(((uint64_t)1 << bit_width) - 1); > } > > -static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev) > +static uint32_t gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device > *adev) > { > - u32 data, mask; > + uint32_t data, mask; > > data = RREG32(mmCC_RB_BACKEND_DISABLE) | > RREG32(mmGC_USER_RB_BACKEND_DISABLE); > @@ -1134,7 +1134,7 @@ static u32 gfx_v6_0_get_rb_active_bitmap(struct > amdgpu_device *adev) > return ~data & mask; > } > > -static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 > *rconf) > +static void gfx_v6_0_raster_config(struct amdgpu_device *adev, uint32_t > *rconf) > { > switch (adev->asic_type) { > case CHIP_TAHITI: > @@ -1167,7 +1167,7 @@ static void gfx_v6_0_raster_config(struct > amdgpu_device *adev, u32 *rconf) > } > > static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device > *adev, > - u32 raster_config, unsigned > rb_mask, > + uint32_t raster_config, > unsigned rb_mask, > unsigned num_rb) > { > unsigned sh_per_se = max_t(unsigned, adev- > >gfx.config.max_sh_per_se, 1); > @@ -1259,10 +1259,10 @@ static void > gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, > static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) > { > int i, j; > - u32 data; > - u32 raster_config = 0; > - u32 active_rbs = 0; > - u32 rb_bitmap_width_per_sh = adev- > >gfx.config.max_backends_per_se / > + uint32_t data; > + uint32_t raster_config = 0; > + uint32_t active_rbs = 0; > + uint32_t rb_bitmap_width_per_sh = adev- > >gfx.config.max_backends_per_se / > adev->gfx.config.max_sh_per_se; > unsigned num_rb_pipes; > > @@ -1311,9 +1311,9 @@ static void gfx_v6_0_setup_rb(struct > amdgpu_device *adev) > } > > static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device > *adev, > - u32 bitmap) > + uint32_t bitmap) > { > - u32 data; > + uint32_t data; > > if (!bitmap) > return; > @@ -1324,9 +1324,9 @@ static void > gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, > WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); > } > > -static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev) > +static uint32_t gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev) > { > - u32 data, mask; > + uint32_t data, mask; > > data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | > RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); > @@ -1339,8 +1339,8 @@ static u32 gfx_v6_0_get_cu_enabled(struct > amdgpu_device *adev) > static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) > { > int i, j, k; > - u32 data, mask; > - u32 active_cu = 0; > + uint32_t data, mask; > + uint32_t active_cu = 0; > > mutex_lock(&adev->grbm_idx_mutex); > for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { > @@ -1371,11 +1371,11 @@ static void gfx_v6_0_config_init(struct > amdgpu_device *adev) > > static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) > { > - u32 gb_addr_config = 0; > - u32 mc_shared_chmap, mc_arb_ramcfg; > - u32 sx_debug_1; > - u32 hdp_host_path_cntl; > - u32 tmp; > + uint32_t gb_addr_config = 0; > + uint32_t mc_shared_chmap, mc_arb_ramcfg; > + uint32_t sx_debug_1; > + uint32_t hdp_host_path_cntl; > + uint32_t tmp; > > switch (adev->asic_type) { > case CHIP_TAHITI: > @@ -1661,8 +1661,8 @@ static void > gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) > amdgpu_ring_write(ring, 0x1); > } > > -static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, > - u64 seq, unsigned flags) > +static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t > addr, > + uint64_t seq, unsigned flags) > { > bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; > bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; > @@ -1693,7 +1693,7 @@ static void gfx_v6_0_ring_emit_ib(struct > amdgpu_ring *ring, > struct amdgpu_ib *ib, > unsigned vm_id, bool ctx_switch) > { > - u32 header, control = 0; > + uint32_t header, control = 0; > > /* insert SWITCH_BUFFER packet before first IB in the ring frame */ > if (ctx_switch) { > @@ -1809,7 +1809,7 @@ static int gfx_v6_0_cp_gfx_load_microcode(struct > amdgpu_device *adev) > const struct gfx_firmware_header_v1_0 *ce_hdr; > const struct gfx_firmware_header_v1_0 *me_hdr; > const __le32 *fw_data; > - u32 fw_size; > + uint32_t fw_size; > > if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) > return -EINVAL; > @@ -1925,10 +1925,10 @@ static int gfx_v6_0_cp_gfx_start(struct > amdgpu_device *adev) > static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev) > { > struct amdgpu_ring *ring; > - u32 tmp; > - u32 rb_bufsz; > + uint32_t tmp; > + uint32_t rb_bufsz; > int r; > - u64 rptr_addr; > + uint64_t rptr_addr; > > WREG32(mmCP_SEM_WAIT_TIMER, 0x0); > WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); > @@ -1979,12 +1979,12 @@ static int gfx_v6_0_cp_gfx_resume(struct > amdgpu_device *adev) > return 0; > } > > -static u64 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring) > +static uint64_t gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring) > { > return ring->adev->wb.wb[ring->rptr_offs]; > } > > -static u64 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring) > +static uint64_t gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring) > { > struct amdgpu_device *adev = ring->adev; > > @@ -2025,10 +2025,10 @@ static void > gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring) > static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev) > { > struct amdgpu_ring *ring; > - u32 tmp; > - u32 rb_bufsz; > + uint32_t tmp; > + uint32_t rb_bufsz; > int i, r; > - u64 rptr_addr; > + uint64_t rptr_addr; > > /* ring1 - compute only */ > /* Set ring buffer size */ > @@ -2098,8 +2098,8 @@ static int gfx_v6_0_cp_load_microcode(struct > amdgpu_device *adev) > static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device > *adev, > bool enable) > { > - u32 tmp = RREG32(mmCP_INT_CNTL_RING0); > - u32 mask; > + uint32_t tmp = RREG32(mmCP_INT_CNTL_RING0); > + uint32_t mask; > int i; > > if (enable) > @@ -2259,16 +2259,16 @@ static void gfx_v6_0_rlc_fini(struct > amdgpu_device *adev) > > static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) > { > - const u32 *src_ptr; > - volatile u32 *dst_ptr; > - u32 dws, i; > - u64 reg_list_mc_addr; > + const uint32_t *src_ptr; > + volatile uint32_t *dst_ptr; > + uint32_t dws, i; > + uint64_t reg_list_mc_addr; > const struct cs_section_def *cs_data; > int r; > > adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list; > adev->gfx.rlc.reg_list_size = > - > (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list); > + > (uint32_t)ARRAY_SIZE(verde_rlc_save_restore_register_list); > > adev->gfx.rlc.cs_data = si_cs_data; > src_ptr = adev->gfx.rlc.reg_list; > @@ -2397,18 +2397,18 @@ static void gfx_v6_0_wait_for_rlc_serdes(struct > amdgpu_device *adev) > } > } > > -static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc) > +static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, uint32_t rlc) > { > - u32 tmp; > + uint32_t tmp; > > tmp = RREG32(mmRLC_CNTL); > if (tmp != rlc) > WREG32(mmRLC_CNTL, rlc); > } > > -static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev) > +static uint32_t gfx_v6_0_halt_rlc(struct amdgpu_device *adev) > { > - u32 data, orig; > + uint32_t data, orig; > > orig = data = RREG32(mmRLC_CNTL); > > @@ -2449,7 +2449,7 @@ static void gfx_v6_0_rlc_reset(struct > amdgpu_device *adev) > > static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev) > { > - u32 tmp; > + uint32_t tmp; > > /* Enable LBPW only for DDR3 */ > tmp = RREG32(mmMC_SEQ_MISC0); > @@ -2464,10 +2464,10 @@ static void gfx_v6_0_init_cg(struct > amdgpu_device *adev) > > static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) > { > - u32 i; > + uint32_t i; > const struct rlc_firmware_header_v1_0 *hdr; > const __le32 *fw_data; > - u32 fw_size; > + uint32_t fw_size; > > > if (!adev->gfx.rlc_fw) > @@ -2509,7 +2509,7 @@ static int gfx_v6_0_rlc_resume(struct > amdgpu_device *adev) > > static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool > enable) > { > - u32 data, orig, tmp; > + uint32_t data, orig, tmp; > > orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); > > @@ -2549,7 +2549,7 @@ static void gfx_v6_0_enable_cgcg(struct > amdgpu_device *adev, bool enable) > static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool > enable) > { > > - u32 data, orig, tmp = 0; > + uint32_t data, orig, tmp = 0; > > if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { > orig = data = RREG32(mmCGTS_SM_CTRL_REG); > @@ -2629,7 +2629,7 @@ static void > gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, > > static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool > enable) > { > - u32 data, orig; > + uint32_t data, orig; > > orig = data = RREG32(mmRLC_PG_CNTL); > if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) > @@ -2647,10 +2647,10 @@ static void gfx_v6_0_enable_gds_pg(struct > amdgpu_device *adev, bool enable) > static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev) > { > const __le32 *fw_data; > - volatile u32 *dst_ptr; > + volatile uint32_t *dst_ptr; > int me, i, max_me = 4; > - u32 bo_offset = 0; > - u32 table_offset, table_size; > + uint32_t bo_offset = 0; > + uint32_t table_offset, table_size; > > if (adev->asic_type == CHIP_KAVERI) > max_me = 5; > @@ -2726,7 +2726,7 @@ static void gfx_v6_0_enable_gfx_cgpg(struct > amdgpu_device *adev, > > static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) > { > - u32 tmp; > + uint32_t tmp; > > WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev- > >gfx.cu_info.ao_cu_mask); > > @@ -2739,7 +2739,7 @@ static void gfx_v6_0_init_ao_cu_mask(struct > amdgpu_device *adev) > static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, > bool enable) > { > - u32 data, orig; > + uint32_t data, orig; > > orig = data = RREG32(mmRLC_PG_CNTL); > if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) > @@ -2753,7 +2753,7 @@ static void > gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, > static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device > *adev, > bool enable) > { > - u32 data, orig; > + uint32_t data, orig; > > orig = data = RREG32(mmRLC_PG_CNTL); > if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) > @@ -2766,7 +2766,7 @@ static void > gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, > > static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev) > { > - u32 tmp; > + uint32_t tmp; > > WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev- > >gfx.rlc.save_restore_gpu_addr >> 8); > WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_SRC, 1); > @@ -2786,9 +2786,9 @@ static void gfx_v6_0_update_gfx_pg(struct > amdgpu_device *adev, bool enable) > gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable); > } > > -static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev) > +static uint32_t gfx_v6_0_get_csb_size(struct amdgpu_device *adev) > { > - u32 count = 0; > + uint32_t count = 0; > const struct cs_section_def *sect = NULL; > const struct cs_extent_def *ext = NULL; > > @@ -2819,9 +2819,9 @@ static u32 gfx_v6_0_get_csb_size(struct > amdgpu_device *adev) > } > > static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, > - volatile u32 *buffer) > + volatile uint32_t *buffer) > { > - u32 count = 0, i; > + uint32_t count = 0, i; > const struct cs_section_def *sect = NULL; > const struct cs_extent_def *ext = NULL; > > @@ -3181,7 +3181,7 @@ static int gfx_v6_0_soft_reset(void *handle) > static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device > *adev, > enum > amdgpu_interrupt_state state) > { > - u32 cp_int_cntl; > + uint32_t cp_int_cntl; > > switch (state) { > case AMDGPU_IRQ_STATE_DISABLE: > @@ -3203,7 +3203,7 @@ static void > gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, > int ring, > enum > amdgpu_interrupt_state state) > { > - u32 cp_int_cntl; > + uint32_t cp_int_cntl; > switch (state){ > case AMDGPU_IRQ_STATE_DISABLE: > if (ring == 0) { > @@ -3244,7 +3244,7 @@ static int gfx_v6_0_set_priv_reg_fault_state(struct > amdgpu_device *adev, > unsigned type, > enum amdgpu_interrupt_state > state) > { > - u32 cp_int_cntl; > + uint32_t cp_int_cntl; > > switch (state) { > case AMDGPU_IRQ_STATE_DISABLE: > @@ -3269,7 +3269,7 @@ static int gfx_v6_0_set_priv_inst_fault_state(struct > amdgpu_device *adev, > unsigned type, > enum amdgpu_interrupt_state > state) > { > - u32 cp_int_cntl; > + uint32_t cp_int_cntl; > > switch (state) { > case AMDGPU_IRQ_STATE_DISABLE: > @@ -3503,10 +3503,10 @@ static void gfx_v6_0_set_irq_funcs(struct > amdgpu_device *adev) > static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) > { > int i, j, k, counter, active_cu_number = 0; > - u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; > + uint32_t mask, bitmap, ao_bitmap, ao_cu_mask = 0; > struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; > unsigned disable_masks[4 * 2]; > - u32 ao_cu_num; > + uint32_t ao_cu_num; > > if (adev->flags & AMD_IS_APU) > ao_cu_num = 2; > -- > 2.12.0 > > _______________________________________________ > amd-gfx mailing list > amd-gfx at lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/amd-gfx