[PATCH 29/52] drm/amd/powerplay: update baffin & ellesmere smc_sk firmware.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: yanyang1 <Young.Yang@xxxxxxx>

sync the code form catalyst CL:#1230866.

Signed-off-by: yanyang1 <Young.Yang@xxxxxxx>
Rviewed-by: Alex Deucher <alexander.deucher@xxxxxxx>
---
 .../gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c  |  51 ++++++++++-
 .../gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h  |   1 +
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h    |   2 +
 .../gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h    |  14 +++
 .../amd/powerplay/hwmgr/tonga_processpptables.c    | 101 +++++++++++++++------
 .../drm/amd/powerplay/smumgr/ellesmere_smumgr.c    |  28 ++++--
 .../drm/amd/powerplay/smumgr/ellesmere_smumgr.h    |   2 +
 7 files changed, 162 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c
index 62f0f36..043aefa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c
@@ -222,6 +222,22 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
 			" found a available voltage in VDDC DPM Table \n");
 }
 
+/**
+* Enable voltage control
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @return   always PP_Result_OK
+*/
+int ellesmere_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(
+		(hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
+		"Failed to enable voltage DPM during DPM Start Function!",
+		return 1;
+	);
+
+	return 0;
+}
 
 /**
 * Checks if we want to support voltage control
@@ -586,6 +602,10 @@ static int ellesmere_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 							pcie_table->entries[i].lane_width));
 		}
 		data->dpm_table.pcie_speed_table.count = max_entry - 1;
+
+		/* Setup BIF_SCLK levels */
+		for (i = 0; i < max_entry; i++)
+			data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
 	} else {
 		/* Hardcode Pcie Table */
 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
@@ -938,9 +958,13 @@ static int ellesmere_calculate_sclk_params(struct pp_hwmgr *hwmgr,
 		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
 		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
 		sclk_setting->PllRange = dividers.ucSclkPllRange;
+		sclk_setting->Sclk_slew_rate = 0x400;
+		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+		sclk_setting->Pcc_down_slew_rate = 0xffff;
 		sclk_setting->SSc_En = dividers.ucSscEnable;
 		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
 		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
 		return result;
 	}
 
@@ -1174,8 +1198,12 @@ static int ellesmere_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
 	return 0;
 }
 
@@ -1458,8 +1486,12 @@ static int ellesmere_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
 
 	if (!data->mclk_dpm_key_disabled) {
 		/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
@@ -1966,6 +1998,7 @@ static int ellesmere_init_smc_table(struct pp_hwmgr *hwmgr)
 	const struct ellesmere_ulv_parm *ulv = &(data->ulv);
 	uint8_t i;
 	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+	pp_atomctrl_clock_dividers_vi dividers;
 
 	result = ellesmere_setup_default_dpm_tables(hwmgr);
 	PP_ASSERT_WITH_CODE(0 == result,
@@ -2121,6 +2154,17 @@ static int ellesmere_init_smc_table(struct pp_hwmgr *hwmgr)
 		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
 	}
 
+	/* Populate BIF_SCLK levels into SMC DPM table */
+	for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
+		PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+		if (i == 0)
+			table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+		else
+			table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+	}
+
 	for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
 		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
 
@@ -2284,12 +2328,13 @@ static int ellesmere_start_dpm(struct pp_hwmgr *hwmgr)
 					VoltageChangeTimeout), 0x1000);
 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
 			SWRST_COMMAND_1, RESETLC, 0x0);
-
+/*
 	PP_ASSERT_WITH_CODE(
 			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
 					PPSMC_MSG_Voltage_Cntl_Enable)),
 			"Failed to enable voltage DPM during DPM Start Function!",
 			return -1);
+*/
 
 	if (ellesmere_enable_sclk_mclk_dpm(hwmgr)) {
 		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
@@ -2450,6 +2495,10 @@ int ellesmere_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 			"Failed to enable SCLK control!", result = tmp_result);
 
+	tmp_result = ellesmere_enable_smc_voltage_controller(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable voltage control!", result = tmp_result);
+
 	tmp_result = ellesmere_enable_ulv(hwmgr);
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 			"Failed to enable ULV!", result = tmp_result);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h
index 7f90252..dd6c60b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h
@@ -274,6 +274,7 @@ struct ellesmere_hwmgr {
 
 	/* ---- DI/DT ---- */
 	struct ellesmere_display_timing        display_timing;
+	uint32_t                      bif_sclk_table[SMU74_MAX_LEVELS_LINK];
 
 	/* ---- Thermal Temperature Setting ---- */
 	struct ellesmere_dpmlevel_enable_mask     dpm_level_enable_mask;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
index c9e6c2d..347fef1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -92,6 +92,8 @@ typedef struct phm_ppt_v1_voltage_lookup_table phm_ppt_v1_voltage_lookup_table;
 struct phm_ppt_v1_pcie_record {
 	uint8_t gen_speed;
 	uint8_t lane_width;
+	uint16_t usreserved;
+	uint32_t pcie_sclk;
 };
 typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
index 9a4456e..a2c87ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -209,6 +209,20 @@ typedef struct _ATOM_Tonga_PCIE_Table {
 	ATOM_Tonga_PCIE_Record entries[1];							/* Dynamically allocate entries. */
 } ATOM_Tonga_PCIE_Table;
 
+typedef struct _ATOM_Ellesmere_PCIE_Record {
+	UCHAR ucPCIEGenSpeed;
+	UCHAR usPCIELaneWidth;
+	UCHAR ucReserved[2];
+	ULONG ulPCIE_Sclk;
+} ATOM_Ellesmere_PCIE_Record;
+
+typedef struct _ATOM_Ellesmere_PCIE_Table {
+	UCHAR ucRevId;
+	UCHAR ucNumEntries;                                         /* Number of entries. */
+	ATOM_Ellesmere_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
+} ATOM_Ellesmere_PCIE_Table;
+
+
 typedef struct _ATOM_Tonga_MM_Dependency_Record {
 	UCHAR   ucVddcInd;											 /* VDDC voltage */
 	USHORT  usVddgfxOffset;									  /* Offset relative to VDDC voltage */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index b156481..ecbc43f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -448,47 +448,90 @@ static int get_sclk_voltage_dependency_table(
 static int get_pcie_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-		const ATOM_Tonga_PCIE_Table * atom_pcie_table
+		const PPTable_Generic_SubTable_Header * pTable
 		)
 {
 	uint32_t table_size, i, pcie_count;
 	phm_ppt_v1_pcie_table *pcie_table;
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	PP_ASSERT_WITH_CODE((0 != atom_pcie_table->ucNumEntries),
-		"Invalid PowerPlay Table!", return -1);
 
-	table_size = sizeof(uint32_t) +
-		sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+	if (pTable->ucRevId < 1) {
+		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
+		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+			"Invalid PowerPlay Table!", return -1);
 
-	pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+		table_size = sizeof(uint32_t) +
+			sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
 
-	if (NULL == pcie_table)
-		return -ENOMEM;
+		pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
 
-	memset(pcie_table, 0x00, table_size);
+		if (pcie_table == NULL)
+			return -ENOMEM;
 
-	/*
-	* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-	* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-	*/
-	pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-	if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-		pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-	else
-		printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-		Disregarding the excess entries... \n");
+		memset(pcie_table, 0x00, table_size);
 
-	pcie_table->count = pcie_count;
+		/*
+		* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+		* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+		*/
+		pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+		if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+			pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+		else
+			printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+			Disregarding the excess entries... \n");
 
-	for (i = 0; i < pcie_count; i++) {
-		pcie_table->entries[i].gen_speed =
-			atom_pcie_table->entries[i].ucPCIEGenSpeed;
-		pcie_table->entries[i].lane_width =
-			atom_pcie_table->entries[i].usPCIELaneWidth;
-	}
+		pcie_table->count = pcie_count;
+
+		for (i = 0; i < pcie_count; i++) {
+			pcie_table->entries[i].gen_speed =
+				atom_pcie_table->entries[i].ucPCIEGenSpeed;
+			pcie_table->entries[i].lane_width =
+				atom_pcie_table->entries[i].usPCIELaneWidth;
+		}
+
+		*pp_tonga_pcie_table = pcie_table;
+	} else {
+		/* Ellesmere/Baffin and newer. */
+		const ATOM_Ellesmere_PCIE_Table *atom_pcie_table = (ATOM_Ellesmere_PCIE_Table *)pTable;
+		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+			"Invalid PowerPlay Table!", return -1);
+
+		table_size = sizeof(uint32_t) +
+			sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+		pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+
+		if (pcie_table == NULL)
+			return -ENOMEM;
+
+		memset(pcie_table, 0x00, table_size);
+
+		/*
+		* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+		* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+		*/
+		pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+		if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+			pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+		else
+			printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+			Disregarding the excess entries... \n");
+
+		pcie_table->count = pcie_count;
+
+		for (i = 0; i < pcie_count; i++) {
+			pcie_table->entries[i].gen_speed =
+				atom_pcie_table->entries[i].ucPCIEGenSpeed;
+			pcie_table->entries[i].lane_width =
+				atom_pcie_table->entries[i].usPCIELaneWidth;
+			pcie_table->entries[i].pcie_sclk =
+				atom_pcie_table->entries[i].ulPCIE_Sclk;
+		}
 
-	*pp_tonga_pcie_table = pcie_table;
+		*pp_tonga_pcie_table = pcie_table;
+	}
 
 	return 0;
 }
@@ -668,8 +711,8 @@ static int init_clock_voltage_dependency(
 	const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
 		(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
 		le16_to_cpu(powerplay_table->usHardLimitTableOffset));
-	const ATOM_Tonga_PCIE_Table *pcie_table =
-		(const ATOM_Tonga_PCIE_Table *)(((unsigned long) powerplay_table) +
+	const PPTable_Generic_SubTable_Header *pcie_table =
+		(const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
 		le16_to_cpu(powerplay_table->usPCIETableOffset));
 
 	pp_table_information->vdd_dep_on_sclk = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.c
index f57ba12..6395065 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.c
@@ -345,7 +345,6 @@ static int ellesmere_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t
 	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
 	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
 
-
 	for (; byte_count >= 4; byte_count -= 4)
 		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
 
@@ -364,6 +363,9 @@ static enum cgs_ucode_id ellesmere_convert_fw_type_to_cgs(uint32_t fw_type)
 	case UCODE_ID_SMU:
 		result = CGS_UCODE_ID_SMU;
 		break;
+	case UCODE_ID_SMU_SK:
+		result = CGS_UCODE_ID_SMU_SK;
+		break;
 	case UCODE_ID_SDMA0:
 		result = CGS_UCODE_ID_SDMA0;
 		break;
@@ -401,14 +403,18 @@ static enum cgs_ucode_id ellesmere_convert_fw_type_to_cgs(uint32_t fw_type)
 static int ellesmere_upload_smu_firmware_image(struct pp_smumgr *smumgr)
 {
 	int result = 0;
+	struct ellesmere_smumgr *smu_data = (struct ellesmere_smumgr *)(smumgr->backend);
 
 	struct cgs_firmware_info info = {0};
 
-	cgs_get_firmware_info(smumgr->device,
+	if (smu_data->security_hard_key == 1)
+		cgs_get_firmware_info(smumgr->device,
 			ellesmere_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+	else
+		cgs_get_firmware_info(smumgr->device,
+			ellesmere_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
 
 	/* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
-
 	result = ellesmere_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, ELLESMERE_SMC_SIZE);
 
 	return result;
@@ -798,13 +804,11 @@ static int ellesmere_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
 						SMU_STATUS, SMU_PASS))
 		PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
 
-
 	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
 
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
 
@@ -860,12 +864,22 @@ static int ellesmere_start_smu(struct pp_smumgr *smumgr)
 	/* Only start SMC if SMC RAM is not running */
 	if (!ellesmere_is_smc_ram_running(smumgr)) {
 		SMU_VFT_INTACT = false;
+		smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+		smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+
 		/* Check if SMU is running in protected mode */
-		if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE))
+		if (smu_data->protected_mode == 0) {
 			result = ellesmere_start_smu_in_non_protection_mode(smumgr);
-		else
+		} else {
 			result = ellesmere_start_smu_in_protection_mode(smumgr);
 
+			/* If failed, try with different security Key. */
+			if (result != 0) {
+				smu_data->security_hard_key ^= 1;
+				result = ellesmere_start_smu_in_protection_mode(smumgr);
+			}
+		}
+
 		if (result != 0)
 			PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.h
index 3712b32..05d636a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ellesmere_smumgr.h
@@ -51,6 +51,8 @@ struct ellesmere_smumgr {
 	uint32_t read_drm_straps_mc_address_low;
 	uint32_t acpi_optimization;
 	bool post_initial_boot;
+	uint8_t protected_mode;
+	uint8_t security_hard_key;
 	struct ellesmere_avfs  avfs;
 };
 
-- 
2.5.0

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux