From: Evan Quan <quanliangl@xxxxxxxxxxx> To protect PMFW from being overloaded. Signed-off-by: Evan Quan <quanliangl@xxxxxxxxxxx> Reviewed-by: Mario Limonciello <mario.limonciello@xxxxxxx> Signed-off-by: Ma Jun <Jun.Ma2@xxxxxxx> --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 29 +++++++++++++++---- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 7 +++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 1553e175019c..2dbfcd7d0ef5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1316,7 +1316,8 @@ static int smu_wbrf_event_handler(struct notifier_block *nb, switch (action) { case WBRF_CHANGED: - smu_wbrf_handle_exclusion_ranges(smu); + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); break; default: return NOTIFY_DONE; @@ -1325,6 +1326,20 @@ static int smu_wbrf_event_handler(struct notifier_block *nb, return NOTIFY_OK; } +/** + * smu_wbrf_delayed_work_handler - callback on delayed work timer expired + * + * @work: struct work_struct pointer + * + * Flood is over and driver will consume the latest exclusion ranges. + */ +static void smu_wbrf_delayed_work_handler(struct work_struct *work) +{ + struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); + + smu_wbrf_handle_exclusion_ranges(smu); +} + /** * smu_wbrf_support_check - check wbrf support * @@ -1354,12 +1369,13 @@ static void smu_wbrf_support_check(struct smu_context *smu) */ static int smu_wbrf_init(struct smu_context *smu) { - struct amdgpu_device *adev = smu->adev; int ret; if (!smu->wbrf_supported) return 0; + INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); + smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); if (ret) @@ -1370,11 +1386,10 @@ static int smu_wbrf_init(struct smu_context *smu) * before our driver loaded. To make sure our driver * is awared of those exclusion ranges. */ - ret = smu_wbrf_handle_exclusion_ranges(smu); - if (ret) - dev_err(adev->dev, "Failed to handle wbrf exclusion ranges\n"); + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); - return ret; + return 0; } /** @@ -1390,6 +1405,8 @@ static void smu_wbrf_fini(struct smu_context *smu) return; amd_wbrf_unregister_notifier(&smu->wbrf_notifier); + + cancel_delayed_work_sync(&smu->wbrf_delayed_work); } static int smu_smc_hw_setup(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3a183e73e9de..5e62b7474c20 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -469,6 +469,12 @@ struct stb_context { #define WORKLOAD_POLICY_MAX 7 +/* + * Configure wbrf event handling pace as there can be only one + * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms. + */ +#define SMU_WBRF_EVENT_HANDLING_PACE 10 + struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -569,6 +575,7 @@ struct smu_context { /* data structures for wbrf feature support */ bool wbrf_supported; struct notifier_block wbrf_notifier; + struct delayed_work wbrf_delayed_work; }; struct i2c_adapter; -- 2.34.1