[AMD Official Use Only] > -----邮件原件----- > 发件人: Lazar, Lijo <Lijo.Lazar@xxxxxxx> > 发送时间: Wednesday, November 17, 2021 7:36 PM > 收件人: Yang, Stanley <Stanley.Yang@xxxxxxx>; amd- > gfx@xxxxxxxxxxxxxxxxxxxxx; Zhang, Hawking <Hawking.Zhang@xxxxxxx>; > Clements, John <John.Clements@xxxxxxx>; Quan, Evan > <Evan.Quan@xxxxxxx>; Wang, Yang(Kevin) <KevinYang.Wang@xxxxxxx> > 主题: Re: [PATCH Review 2/4] drm/amdgpu: add new query interface for > umc block > > > > On 11/17/2021 3:41 PM, Stanley.Yang wrote: > > add message smu to query error information > > > > Signed-off-by: Stanley.Yang <Stanley.Yang@xxxxxxx> > > --- > > drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 16 +++ > > drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 4 + > > drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 161 > ++++++++++++++++++++++++ > > 3 files changed, 181 insertions(+) > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h > > index cdd0010a5389..bcbf3264d92f 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h > > @@ -320,6 +320,19 @@ struct ras_common_if { > > char name[32]; > > }; > > > > +#define MAX_UMC_CHANNEL_NUM 32 > > + > > +struct ecc_info_per_ch { > > + uint16_t ce_count_lo_chip; > > + uint16_t ce_count_hi_chip; > > + uint64_t mca_umc_status; > > + uint64_t mca_umc_addr; > > +}; > > + > > +struct umc_ecc_info { > > + struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM]; }; > > + > > struct amdgpu_ras { > > /* ras infrastructure */ > > /* for ras itself. */ > > @@ -359,6 +372,9 @@ struct amdgpu_ras { > > struct delayed_work ras_counte_delay_work; > > atomic_t ras_ue_count; > > atomic_t ras_ce_count; > > + > > + /* record umc error info queried from smu */ > > + struct umc_ecc_info umc_ecc; > > }; > > > > struct ras_fs_data { > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h > > index 1f5fe2315236..7aa9b21eb906 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h > > @@ -49,6 +49,10 @@ struct amdgpu_umc_ras_funcs { > > void (*query_ras_error_address)(struct amdgpu_device *adev, > > void *ras_error_status); > > bool (*query_ras_poison_mode)(struct amdgpu_device *adev); > > + void (*message_smu_query_ras_error_count)(struct > amdgpu_device *adev, > > + void *ras_error_status); > > + void (*message_smu_query_ras_error_address)(struct > amdgpu_device *adev, > > + void *ras_error_status); > > Maybe rename message_smu to ecc_info. These methods fetch the error > from umc_ecc_info table. They don't deal with smu or care about how the > information gets filled. As long as ecc_info_table is filled, they could get the > info. [Yang, Stanley] yeah, it seems rename message_smu to ecc_info is better since ecc_table has been update before this call. > > Thanks, > Lijo > > > }; > > > > struct amdgpu_umc_funcs { > > diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c > > b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c > > index f7ec3fe134e5..cd96e8b734cb 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c > > +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c > > @@ -50,6 +50,165 @@ static inline uint32_t > get_umc_v6_7_reg_offset(struct amdgpu_device *adev, > > return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * > umc_inst; > > } > > > > +static inline uint32_t get_umc_v6_7_channel_index(struct > amdgpu_device *adev, > > + uint32_t umc_inst, > > + uint32_t ch_inst) > > +{ > > + return adev->umc.channel_idx_tbl[umc_inst * > > +adev->umc.channel_inst_num + ch_inst]; } > > + > > +static void > umc_v6_7_message_smu_query_correctable_error_count(struct > amdgpu_device *adev, > > + uint32_t channel_index, > > + unsigned long *error_count) > > +{ > > + uint32_t ecc_err_cnt; > > + uint64_t mc_umc_status; > > + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); > > + > > + /* > > + * select the lower chip and check the error count > > + * skip add error count, calc error counter only from mca_umc_status > > + */ > > + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip; > > + > > + /* > > + * select the higher chip and check the err counter > > + * skip add error count, calc error counter only from mca_umc_status > > + */ > > + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip; > > + > > + /* check for SRAM correctable error > > + MCUMC_STATUS is a 64 bit register */ > > + mc_umc_status = ras- > >umc_ecc.ecc[channel_index].mca_umc_status; > > + if (REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) > > + *error_count += 1; > > +} > > + > > +static void > umc_v6_7_message_smu_querry_uncorrectable_error_count(struct > amdgpu_device *adev, > > + uint32_t channel_index, > > + unsigned long > *error_count) { > > + uint64_t mc_umc_status; > > + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); > > + > > + /* check the MCUMC_STATUS */ > > + mc_umc_status = ras- > >umc_ecc.ecc[channel_index].mca_umc_status; > > + if ((REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && > > + (REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) > > + *error_count += 1; > > +} > > + > > +static void umc_v6_7_message_smu_query_ras_error_count(struct > amdgpu_device *adev, > > + void *ras_error_status) > > +{ > > + struct ras_err_data *err_data = (struct ras_err_data > > +*)ras_error_status; > > + > > + uint32_t umc_inst = 0; > > + uint32_t ch_inst = 0; > > + uint32_t umc_reg_offset = 0; > > + uint32_t channel_index = 0; > > + > > + /*TODO: driver needs to toggle DF Cstate to ensure > > + * safe access of UMC registers. Will add the protection */ > > + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { > > + umc_reg_offset = get_umc_v6_7_reg_offset(adev, > > + umc_inst, > > + ch_inst); > > + channel_index = get_umc_v6_7_channel_index(adev, > > + umc_inst, > > + ch_inst); > > + > umc_v6_7_message_smu_query_correctable_error_count(adev, > > + channel_index, > > + &(err_data->ce_count)); > > + > umc_v6_7_message_smu_querry_uncorrectable_error_count(adev, > > + channel_index, > > + &(err_data- > >ue_count)); > > + } > > +} > > + > > +static void umc_v6_7_message_smu_query_error_address(struct > amdgpu_device *adev, > > + struct ras_err_data *err_data, > > + uint32_t umc_reg_offset, > > + uint32_t ch_inst, > > + uint32_t umc_inst) > > +{ > > + uint64_t mc_umc_status, err_addr, retired_page; > > + struct eeprom_table_record *err_rec; > > + uint32_t channel_index; > > + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); > > + > > + channel_index = > > + adev->umc.channel_idx_tbl[umc_inst * adev- > >umc.channel_inst_num + > > +ch_inst]; > > + > > + mc_umc_status = ras- > >umc_ecc.ecc[channel_index].mca_umc_status; > > + > > + if (mc_umc_status == 0) > > + return; > > + > > + if (!err_data->err_addr) > > + return; > > + > > + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; > > + > > + /* calculate error address if ue/ce error is detected */ > > + if (REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && > > + (REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || > > + REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, CECC) > > +== 1)) { > > + > > + err_addr = ras- > >umc_ecc.ecc[channel_index].mca_umc_addr; > > + err_addr = REG_GET_FIELD(err_addr, > MCA_UMC_UMC0_MCUMC_ADDRT0, > > +ErrorAddr); > > + > > + /* translate umc channel address to soc pa, 3 parts are > included */ > > + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | > > + ADDR_OF_256B_BLOCK(channel_index) | > > + OFFSET_IN_256B_BLOCK(err_addr); > > + > > + /* we only save ue error information currently, ce is skipped > */ > > + if (REG_GET_FIELD(mc_umc_status, > MCA_UMC_UMC0_MCUMC_STATUST0, UECC) > > + == 1) { > > + err_rec->address = err_addr; > > + /* page frame address is saved */ > > + err_rec->retired_page = retired_page >> > AMDGPU_GPU_PAGE_SHIFT; > > + err_rec->ts = (uint64_t)ktime_get_real_seconds(); > > + err_rec->err_type = > AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; > > + err_rec->cu = 0; > > + err_rec->mem_channel = channel_index; > > + err_rec->mcumc_id = umc_inst; > > + > > + err_data->err_addr_cnt++; > > + } > > + } > > +} > > + > > +static void umc_v6_7_message_smu_query_ras_error_address(struct > amdgpu_device *adev, > > + void *ras_error_status) > > +{ > > + struct ras_err_data *err_data = (struct ras_err_data > > +*)ras_error_status; > > + > > + uint32_t umc_inst = 0; > > + uint32_t ch_inst = 0; > > + uint32_t umc_reg_offset = 0; > > + > > + /*TODO: driver needs to toggle DF Cstate to ensure > > + * safe access of UMC resgisters. Will add the protection > > + * when firmware interface is ready */ > > + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { > > + umc_reg_offset = get_umc_v6_7_reg_offset(adev, > > + umc_inst, > > + ch_inst); > > + umc_v6_7_message_smu_query_error_address(adev, > > + err_data, > > + umc_reg_offset, > > + ch_inst, > > + umc_inst); > > + } > > +} > > + > > static void umc_v6_7_query_correctable_error_count(struct > amdgpu_device *adev, > > uint32_t umc_reg_offset, > > unsigned long *error_count) > @@ -327,4 +486,6 @@ const > > struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { > > .query_ras_error_count = umc_v6_7_query_ras_error_count, > > .query_ras_error_address = umc_v6_7_query_ras_error_address, > > .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, > > + .message_smu_query_ras_error_count = > umc_v6_7_message_smu_query_ras_error_count, > > + .message_smu_query_ras_error_address = > > +umc_v6_7_message_smu_query_ras_error_address, > > }; > >