On 8/18/2023 8:56 AM, Evan Quan wrote:
To protect PMFW from being overloaded.
Signed-off-by: Evan Quan <evan.quan@xxxxxxx>
Reviewed-by: Mario Limonciello <mario.limonciello@xxxxxxx>
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 31 +++++++++++++++----
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 7 +++++
2 files changed, 32 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 704442ce1da3..6c8bcdc17a15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1318,7 +1318,8 @@ static int smu_wbrf_event_handler(struct notifier_block *nb,
switch (action) {
case WBRF_CHANGED:
- smu_wbrf_handle_exclusion_ranges(smu);
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
break;
default:
return NOTIFY_DONE;
@@ -1327,6 +1328,21 @@ static int smu_wbrf_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu =
+ container_of(work, struct smu_context, wbrf_delayed_work.work);
+
+ smu_wbrf_handle_exclusion_ranges(smu);
+}
+
/**
* smu_wbrf_support_check - check wbrf support
*
@@ -1357,12 +1373,14 @@ static void smu_wbrf_support_check(struct smu_context *smu)
*/
static int smu_wbrf_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
if (!smu->wbrf_supported)
return 0;
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work,
+ smu_wbrf_delayed_work_handler);
+
This is one-time and can be part of sw_init.
Thanks,
Lijo
smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
ret = wbrf_register_notifier(&smu->wbrf_notifier);
if (ret)
@@ -1373,11 +1391,10 @@ static int smu_wbrf_init(struct smu_context *smu)
* before our driver loaded. To make sure our driver
* is awared of those exclusion ranges.
*/
- ret = smu_wbrf_handle_exclusion_ranges(smu);
- if (ret)
- dev_err(adev->dev, "Failed to handle wbrf exclusion ranges\n");
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
- return ret;
+ return 0;
}
/**
@@ -1393,6 +1410,8 @@ static void smu_wbrf_fini(struct smu_context *smu)
return;
wbrf_unregister_notifier(&smu->wbrf_notifier);
+
+ cancel_delayed_work_sync(&smu->wbrf_delayed_work);
}
static int smu_smc_hw_setup(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 244297979f92..4d5cb1b511e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -480,6 +480,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE 10
+
struct smu_context
{
struct amdgpu_device *adev;
@@ -581,6 +587,7 @@ struct smu_context
/* data structures for wbrf feature support */
bool wbrf_supported;
struct notifier_block wbrf_notifier;
+ struct delayed_work wbrf_delayed_work;
};
struct i2c_adapter;