> -----Original Message----- > From: amd-gfx <amd-gfx-bounces@xxxxxxxxxxxxxxxxxxxxx> On Behalf Of > Kenneth Feng > Sent: Tuesday, November 12, 2019 4:40 PM > To: amd-gfx@xxxxxxxxxxxxxxxxxxxxx > Cc: Feng, Kenneth <Kenneth.Feng@xxxxxxx> > Subject: [PATCH] drm/amd/powerplay: read pcie speed/width info > > sysfs interface to read pcie speed&width info on navi1x. > > Signed-off-by: Kenneth Feng <kenneth.feng@xxxxxxx> > --- > drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 10 +++--- > drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 8 +++++ > drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 50 > ++++++++++++++++++++++++++- > drivers/gpu/drm/amd/powerplay/navi10_ppt.h | 3 ++ > 4 files changed, 66 insertions(+), 5 deletions(-) > > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > index 57459a6..69243a8 100644 > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > @@ -1068,10 +1068,6 @@ static int smu_smc_table_hw_init(struct > smu_context *smu, > return ret; > > if (adev->asic_type != CHIP_ARCTURUS) { > - ret = smu_override_pcie_parameters(smu); > - if (ret) > - return ret; > - > ret = smu_notify_display_change(smu); > if (ret) > return ret; > @@ -1100,6 +1096,12 @@ static int smu_smc_table_hw_init(struct > smu_context *smu, > return ret; > } > > + if (adev->asic_type != CHIP_ARCTURUS) { > + ret = smu_override_pcie_parameters(smu); > + if (ret) > + return ret; > + } > + > ret = smu_set_default_od_settings(smu, initialize); > if (ret) > return ret; > diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h > b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h > index 0ba7a72..6061490 100644 > --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h > +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h > @@ -48,6 +48,8 @@ > > #define SMU11_TOOL_SIZE 0x19000 > > +#define MAX_PCIE_CONF 2 > + > #define CLK_MAP(clk, index) \ > [SMU_##clk] = {1, (index)} > > @@ -88,6 +90,11 @@ struct smu_11_0_dpm_table { > uint32_t max; /* MHz */ > }; > > +struct smu_11_0_pcie_table { > + uint8_t pcie_gen[MAX_PCIE_CONF]; > + uint8_t pcie_lane[MAX_PCIE_CONF]; > +}; > + > struct smu_11_0_dpm_tables { > struct smu_11_0_dpm_table soc_table; > struct smu_11_0_dpm_table gfx_table; > @@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables { > struct smu_11_0_dpm_table display_table; > struct smu_11_0_dpm_table phy_table; > struct smu_11_0_dpm_table fclk_table; > + struct smu_11_0_pcie_table pcie_table; > }; > > struct smu_11_0_dpm_context { > diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c > b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c > index 36cf313..8855bcc 100644 > --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c > +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c > @@ -36,6 +36,7 @@ > #include "navi10_ppt.h" > #include "smu_v11_0_pptable.h" > #include "smu_v11_0_ppsmc.h" > +#include "nbio/nbio_7_4_sh_mask.h" > > #include "asic_reg/mp/mp_11_0_sh_mask.h" > > @@ -599,6 +600,7 @@ static int navi10_set_default_dpm_table(struct > smu_context *smu) > struct smu_table_context *table_context = &smu->smu_table; > struct smu_11_0_dpm_context *dpm_context = smu_dpm- > >dpm_context; > PPTable_t *driver_ppt = NULL; > + int i; > > driver_ppt = table_context->driver_pptable; > > @@ -629,6 +631,11 @@ static int navi10_set_default_dpm_table(struct > smu_context *smu) > dpm_context->dpm_tables.phy_table.min = driver_ppt- > >FreqTablePhyclk[0]; > dpm_context->dpm_tables.phy_table.max = driver_ppt- > >FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1]; > > + for (i = 0; i < MAX_PCIE_CONF; i++) { > + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt- > >PcieGenSpeed[i]; > + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = > driver_ppt->PcieLaneCount[i]; > + } > + > return 0; > } > > @@ -710,6 +717,11 @@ static int navi10_print_clk_levels(struct smu_context > *smu, > struct smu_table_context *table_context = &smu->smu_table; > od_table = (OverDriveTable_t *)table_context->overdrive_table; > od_settings = smu->od_settings; > + uint32_t gen_speed, lane_width; > + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; > + struct smu_11_0_dpm_context *dpm_context = smu_dpm- > >dpm_context; > + struct amdgpu_device *adev = smu->adev; > + PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; > > switch (clk_type) { > case SMU_GFXCLK: > @@ -760,6 +772,30 @@ static int navi10_print_clk_levels(struct smu_context > *smu, > > } > break; > + case SMU_PCIE: > + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & > + > PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) > + >> > PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; > + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) > & > + > PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) > + >> > PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; > + for (i = 0; i < NUM_LINK_LEVELS; i++) > + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, > + (dpm_context- > >dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : > + (dpm_context- > >dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," : > + (dpm_context- > >dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," : > + (dpm_context- > >dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "", > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" : > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" : > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" : > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" : > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" : > + (dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "", > + pptable->LclkFreq[i], > + (gen_speed == dpm_context- > >dpm_tables.pcie_table.pcie_gen[i]) && > + (lane_width == dpm_context- > >dpm_tables.pcie_table.pcie_lane[i]) ? > + "*" : ""); > + break; > case SMU_OD_SCLK: > if (!smu->od_enabled || !od_table || !od_settings) > break; > @@ -1690,6 +1726,9 @@ static int navi10_update_pcie_parameters(struct > smu_context *smu, > int ret, i; > uint32_t smu_pcie_arg; > > + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; > + struct smu_11_0_dpm_context *dpm_context = smu_dpm- > >dpm_context; > + > for (i = 0; i < NUM_LINK_LEVELS; i++) { > smu_pcie_arg = (i << 16) | > ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? > (pptable->PcieGenSpeed[i] << 8) : > @@ -1698,8 +1737,17 @@ static int navi10_update_pcie_parameters(struct > smu_context *smu, > ret = smu_send_smc_msg_with_param(smu, > SMU_MSG_OverridePcieParameters, > smu_pcie_arg); > + if (!ret) { > + if (pptable->PcieGenSpeed[i] > pcie_gen_cap) > + dpm_context- > >dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; > + if (pptable->PcieLaneCount[i] > pcie_width_cap) > + dpm_context- > >dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; > + } else { > + return ret; > + } [Quan, Evan] Better to update this as below. Anyway, the patch is reviewed-by: Evan Quan <evan.quan@xxxxxxx> If (ret) return ret; if (pptable->PcieGenSpeed[i] > pcie_gen_cap) dpm_context-dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; if (pptable->PcieLaneCount[i] > pcie_width_cap) dpm_context-dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; > } > - return ret; > + > + return 0; > } > > static inline void navi10_dump_od_table(OverDriveTable_t *od_table) { > diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h > b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h > index fd6dda1..ec03c799 100644 > --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h > +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h > @@ -35,6 +35,9 @@ > > #define NAVI10_VOLTAGE_SCALE (4) > > +#define smnPCIE_LC_SPEED_CNTL 0x11140290 > +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 > + > extern void navi10_set_ppt_funcs(struct smu_context *smu); > > #endif > -- > 2.7.4 > > _______________________________________________ > amd-gfx mailing list > amd-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/amd-gfx _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx