Re: [PATCH 1/3] drm/amd/pm: Add APIs for device access checks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



<Ping as the fix with second patch is a bit urgent>

On 2/4/2025 12:08 PM, Lijo Lazar wrote:
> Wrap the checks before device access in helper functions and use them
> for device access. The generic order of APIs now is to do input argument
> validation first and check if device access is allowed.
> 
> Signed-off-by: Lijo Lazar <lijo.lazar@xxxxxxx>
> ---
>  drivers/gpu/drm/amd/pm/amdgpu_pm.c | 616 +++++++++++------------------
>  1 file changed, 229 insertions(+), 387 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> index 0aca0803514e..0fe0b798f559 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> @@ -97,6 +97,77 @@ const char * const amdgpu_pp_profile_name[] = {
>  	"UNCAPPED",
>  };
>  
> +/**
> + * amdgpu_pm_dev_state_check - Check if device can be accessed.
> + * @adev: Target device.
> + *
> + * Checks the state of the @adev for access. Return 0 if the device is
> + * accessible or a negative error code otherwise.
> + */
> +static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev)
> +{
> +	if (amdgpu_in_reset(adev))
> +		return -EPERM;
> +	if (adev->in_suspend && !adev->in_runpm)
> +		return -EPERM;
> +
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
> + * @adev: Target device.
> + *
> + * Checks the state of the @adev for access. Use runtime pm API to resume if
> + * needed. Return 0 if the device is accessible or a negative error code
> + * otherwise.
> + */
> +static int amdgpu_pm_get_access(struct amdgpu_device *adev)
> +{
> +	int ret;
> +
> +	ret = amdgpu_pm_dev_state_check(adev);
> +	if (ret)
> +		return ret;
> +
> +	return pm_runtime_resume_and_get(adev->dev);
> +}
> +
> +/**
> + * amdgpu_pm_get_access_if_active - Check if device is active for access.
> + * @adev: Target device.
> + *
> + * Checks the state of the @adev for access. Use runtime pm API to determine
> + * if device is active. Allow access only if device is active.Return 0 if the
> + * device is accessible or a negative error code otherwise.
> + */
> +static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
> +{
> +	int ret;
> +
> +	ret = amdgpu_pm_dev_state_check(adev);
> +	if (ret)
> +		return ret;
> +
> +	ret = pm_runtime_get_if_active(adev->dev);
> +	if (ret <= 0)
> +		return ret ?: -EPERM;
> +
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
> + * @adev: Target device.
> + *
> + * Should be paired with amdgpu_pm_get_access* calls
> + */
> +static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
> +{
> +	pm_runtime_mark_last_busy(adev->dev);
> +	pm_runtime_put_autosuspend(adev->dev);
> +}
> +
>  /**
>   * DOC: power_dpm_state
>   *
> @@ -140,18 +211,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
>  	enum amd_pm_state_type pm;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	amdgpu_dpm_get_current_power_state(adev, &pm);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return sysfs_emit(buf, "%s\n",
>  			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
> @@ -168,11 +234,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
>  	enum amd_pm_state_type  state;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	if (strncmp("battery", buf, strlen("battery")) == 0)
>  		state = POWER_STATE_TYPE_BATTERY;
>  	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
> @@ -182,14 +243,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
>  	else
>  		return -EINVAL;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	amdgpu_dpm_set_power_state(adev, state);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  }
> @@ -263,18 +323,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
>  	enum amd_dpm_forced_level level = 0xff;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	level = amdgpu_dpm_get_performance_level(adev);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return sysfs_emit(buf, "%s\n",
>  			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
> @@ -299,11 +354,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
>  	enum amd_dpm_forced_level level;
>  	int ret = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	if (strncmp("low", buf, strlen("low")) == 0) {
>  		level = AMD_DPM_FORCED_LEVEL_LOW;
>  	} else if (strncmp("high", buf, strlen("high")) == 0) {
> @@ -328,14 +378,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
>  		return -EINVAL;
>  	}
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
>  	if (amdgpu_dpm_force_performance_level(adev, level)) {
> -		pm_runtime_mark_last_busy(ddev->dev);
> -		pm_runtime_put_autosuspend(ddev->dev);
> +		amdgpu_pm_put_access(adev);
>  		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
>  		return -EINVAL;
>  	}
> @@ -343,8 +392,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
>  	adev->pm.stable_pstate_ctx = NULL;
>  	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  }
> @@ -359,19 +407,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
>  	uint32_t i;
>  	int buf_len, ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	if (amdgpu_dpm_get_pp_num_states(adev, &data))
>  		memset(&data, 0, sizeof(data));
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
>  	for (i = 0; i < data.nums; i++)
> @@ -394,20 +437,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
>  	enum amd_pm_state_type pm = 0;
>  	int i = 0, ret = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	amdgpu_dpm_get_current_power_state(adev, &pm);
>  
>  	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return ret;
> @@ -429,11 +467,11 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
>  {
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = drm_to_adev(ddev);
> +	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> +	ret = amdgpu_pm_dev_state_check(adev);
> +	if (ret)
> +		return ret;
>  
>  	if (adev->pm.pp_force_state_enabled)
>  		return amdgpu_get_pp_cur_state(dev, attr, buf);
> @@ -453,11 +491,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
>  	unsigned long idx;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	adev->pm.pp_force_state_enabled = false;
>  
>  	if (strlen(buf) == 1)
> @@ -469,7 +502,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
>  
>  	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
> @@ -490,14 +523,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
>  		adev->pm.pp_force_state_enabled = true;
>  	}
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  
>  err_out:
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
> +
>  	return ret;
>  }
>  
> @@ -521,18 +553,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
>  	char *table = NULL;
>  	int size, ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_get_pp_table(adev, &table);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (size <= 0)
>  		return size;
> @@ -554,19 +581,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
>  	struct amdgpu_device *adev = drm_to_adev(ddev);
>  	int ret = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return ret;
> @@ -735,11 +756,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
>  	const char delimiter[3] = {' ', '\n', '\0'};
>  	uint32_t type;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	if (count > 127 || count == 0)
>  		return -EINVAL;
>  
> @@ -785,7 +801,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
>  			tmp_str++;
>  	}
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
> @@ -806,14 +822,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
>  			goto err_out;
>  	}
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  
>  err_out:
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
> +
>  	return -EINVAL;
>  }
>  
> @@ -835,14 +850,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
>  	};
>  	uint clk_index;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
>  		ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
> @@ -861,7 +871,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
>  	if (size == 0)
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -892,23 +902,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
>  	uint64_t featuremask;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	ret = kstrtou64(buf, 0, &featuremask);
>  	if (ret)
>  		return -EINVAL;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return -EINVAL;
> @@ -925,20 +929,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
>  	ssize_t size;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
>  	if (size <= 0)
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -991,14 +990,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
>  	int size = 0;
>  	int ret = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
>  	if (ret == -ENOENT)
> @@ -1007,7 +1001,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
>  	if (size == 0)
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -1057,23 +1051,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
>  	int ret;
>  	uint32_t mask = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	ret = amdgpu_read_mask(buf, count, &mask);
>  	if (ret)
>  		return ret;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return -EINVAL;
> @@ -1240,18 +1228,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
>  	uint32_t value = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	value = amdgpu_dpm_get_sclk_od(adev);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return sysfs_emit(buf, "%d\n", value);
>  }
> @@ -1266,24 +1249,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
>  	int ret;
>  	long int value;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	ret = kstrtol(buf, 0, &value);
>  
>  	if (ret)
>  		return -EINVAL;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  }
> @@ -1297,18 +1274,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
>  	uint32_t value = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	value = amdgpu_dpm_get_mclk_od(adev);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return sysfs_emit(buf, "%d\n", value);
>  }
> @@ -1323,24 +1295,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
>  	int ret;
>  	long int value;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	ret = kstrtol(buf, 0, &value);
>  
>  	if (ret)
>  		return -EINVAL;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  }
> @@ -1378,20 +1344,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
>  	ssize_t size;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
>  	if (size <= 0)
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -1414,11 +1375,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
>  	long int profile_mode = 0;
>  	const char delimiter[3] = {' ', '\n', '\0'};
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	tmp[0] = *(buf);
>  	tmp[1] = '\0';
>  	ret = kstrtol(tmp, 0, &profile_mode);
> @@ -1445,14 +1401,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
>  	}
>  	parameter[parameter_size] = profile_mode;
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (!ret)
>  		return count;
> @@ -1466,19 +1421,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
>  {
>  	int r, size = sizeof(uint32_t);
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	r = pm_runtime_get_if_active(adev->dev);
> -	if (r <= 0)
> -		return r ?: -EPERM;
> +	r = amdgpu_pm_get_access_if_active(adev);
> +	if (r)
> +		return r;
>  
>  	/* get the sensor value */
>  	r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return r;
>  }
> @@ -1576,24 +1526,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
>  	uint64_t count0 = 0, count1 = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	if (adev->flags & AMD_IS_APU)
>  		return -ENODATA;
>  
>  	if (!adev->asic_funcs->get_pcie_usage)
>  		return -ENODATA;
>  
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return sysfs_emit(buf, "%llu %llu %i\n",
>  			  count0, count1, pcie_get_mps(adev->pdev));
> @@ -1615,12 +1560,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
>  {
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = drm_to_adev(ddev);
> +	int r;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> +	r = amdgpu_pm_dev_state_check(adev);
> +	if (r)
> +		return r;
>  	if (adev->unique_id)
>  		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
>  
> @@ -1715,9 +1659,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = drm_to_adev(ddev);
>  
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
>  	if (!ret)
> @@ -1725,7 +1669,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
>  	else
>  		size = sysfs_emit(buf, "failed to get thermal limit\n");
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -1749,20 +1693,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
>  		return -EINVAL;
>  	}
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
>  	if (ret) {
> -		pm_runtime_mark_last_busy(ddev->dev);
> -		pm_runtime_put_autosuspend(ddev->dev);
> +		amdgpu_pm_put_access(adev);
>  		dev_err(dev, "failed to update thermal limit\n");
>  		return ret;
>  	}
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  }
> @@ -1786,18 +1728,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
>  	ssize_t size = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
>  
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -1824,14 +1761,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
>  	ssize_t size = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(ddev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
>  	if (size <= 0)
> @@ -1843,7 +1775,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
>  	memcpy(buf, gpu_metrics, size);
>  
>  out:
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -1939,19 +1871,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
>  	int r = 0;
>  	int bias = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	r = pm_runtime_resume_and_get(ddev->dev);
> -	if (r < 0)
> -		return r;
> -
>  	r = kstrtoint(buf, 10, &bias);
>  	if (r)
>  		goto out;
>  
> +	r = amdgpu_pm_get_access(adev);
> +	if (r < 0)
> +		return r;
> +
>  	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
>  		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
>  	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
> @@ -1963,8 +1890,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
>  	/* TODO: update bias level with SMU message */
>  
>  out:
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
> +
>  	return r;
>  }
>  
> @@ -2218,14 +2145,14 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = drm_to_adev(ddev);
>  	struct amdgpu_pm_policy_attr *policy_attr;
> +	int r;
>  
>  	policy_attr =
>  		container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> +	r = amdgpu_pm_dev_state_check(adev);
> +	if (r)
> +		return r;
>  
>  	return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
>  }
> @@ -2243,11 +2170,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
>  	char *tmp, *param;
>  	long val;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	count = min(count, sizeof(tmp_buf));
>  	memcpy(tmp_buf, buf, count);
>  	tmp_buf[count - 1] = '\0';
> @@ -2273,14 +2195,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
>  	policy_attr =
>  		container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
>  
> -	ret = pm_runtime_resume_and_get(ddev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
>  
> -	pm_runtime_mark_last_busy(ddev->dev);
> -	pm_runtime_put_autosuspend(ddev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return ret;
> @@ -2704,18 +2625,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
>  	u32 pwm_mode = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(adev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return -EINVAL;
> @@ -2733,11 +2649,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
>  	u32 pwm_mode;
>  	int value;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	err = kstrtoint(buf, 10, &value);
>  	if (err)
>  		return err;
> @@ -2751,14 +2662,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
>  	else
>  		return -EINVAL;
>  
> -	ret = pm_runtime_resume_and_get(adev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
>  	ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
>  
> -	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return -EINVAL;
> @@ -2789,16 +2699,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
>  	u32 value;
>  	u32 pwm_mode;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	err = kstrtou32(buf, 10, &value);
>  	if (err)
>  		return err;
>  
> -	err = pm_runtime_resume_and_get(adev->dev);
> +	err = amdgpu_pm_get_access(adev);
>  	if (err < 0)
>  		return err;
>  
> @@ -2815,8 +2720,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
>  	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
>  
>  out:
> -	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -2832,18 +2736,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
>  	int err;
>  	u32 speed = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	err = pm_runtime_get_if_active(adev->dev);
> -	if (err <= 0)
> -		return err ?: -EPERM;
> +	err = amdgpu_pm_get_access_if_active(adev);
> +	if (err)
> +		return err;
>  
>  	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -2859,18 +2758,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
>  	int err;
>  	u32 speed = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	err = pm_runtime_get_if_active(adev->dev);
> -	if (err <= 0)
> -		return err ?: -EPERM;
> +	err = amdgpu_pm_get_access_if_active(adev);
> +	if (err)
> +		return err;
>  
>  	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -2920,18 +2814,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
>  	int err;
>  	u32 rpm = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	err = pm_runtime_get_if_active(adev->dev);
> -	if (err <= 0)
> -		return err ?: -EPERM;
> +	err = amdgpu_pm_get_access_if_active(adev);
> +	if (err)
> +		return err;
>  
>  	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -2948,16 +2837,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
>  	u32 value;
>  	u32 pwm_mode;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	err = kstrtou32(buf, 10, &value);
>  	if (err)
>  		return err;
>  
> -	err = pm_runtime_resume_and_get(adev->dev);
> +	err = amdgpu_pm_get_access(adev);
>  	if (err < 0)
>  		return err;
>  
> @@ -2973,8 +2857,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
>  	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
>  
>  out:
> -	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -2990,18 +2873,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
>  	u32 pwm_mode = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(adev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (ret)
>  		return -EINVAL;
> @@ -3019,11 +2897,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
>  	int value;
>  	u32 pwm_mode;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	err = kstrtoint(buf, 10, &value);
>  	if (err)
>  		return err;
> @@ -3035,14 +2908,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
>  	else
>  		return -EINVAL;
>  
> -	err = pm_runtime_resume_and_get(adev->dev);
> +	err = amdgpu_pm_get_access(adev);
>  	if (err < 0)
>  		return err;
>  
>  	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
>  
> -	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return -EINVAL;
> @@ -3157,14 +3029,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
>  	ssize_t size;
>  	int r;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	r = pm_runtime_get_if_active(adev->dev);
> -	if (r <= 0)
> -		return r ?: -EPERM;
> +	r = amdgpu_pm_get_access_if_active(adev);
> +	if (r)
> +		return r;
>  
>  	r = amdgpu_dpm_get_power_limit(adev, &limit,
>  				      pp_limit_level, power_type);
> @@ -3174,7 +3041,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
>  	else
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -3235,11 +3102,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
>  	int err;
>  	u32 value;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	if (amdgpu_sriov_vf(adev))
>  		return -EINVAL;
>  
> @@ -3250,14 +3112,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
>  	value = value / 1000000; /* convert to Watt */
>  	value |= limit_type << 24;
>  
> -	err = pm_runtime_resume_and_get(adev->dev);
> +	err = amdgpu_pm_get_access(adev);
>  	if (err < 0)
>  		return err;
>  
>  	err = amdgpu_dpm_set_power_limit(adev, value);
>  
> -	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> -	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	if (err)
>  		return err;
> @@ -3695,20 +3556,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
>  	int size = 0;
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	ret = pm_runtime_get_if_active(adev->dev);
> -	if (ret <= 0)
> -		return ret ?: -EPERM;
> +	ret = amdgpu_pm_get_access_if_active(adev);
> +	if (ret)
> +		return ret;
>  
>  	size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
>  	if (size == 0)
>  		size = sysfs_emit(buf, "\n");
>  
> -	pm_runtime_put_autosuspend(adev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return size;
>  }
> @@ -3776,11 +3632,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
>  	long parameter[64];
>  	int ret;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
>  	ret = parse_input_od_command_lines(in_buf,
>  					   count,
>  					   &cmd_type,
> @@ -3789,7 +3640,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
>  	if (ret)
>  		return ret;
>  
> -	ret = pm_runtime_resume_and_get(adev->dev);
> +	ret = amdgpu_pm_get_access(adev);
>  	if (ret < 0)
>  		return ret;
>  
> @@ -3808,14 +3659,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
>  			goto err_out;
>  	}
>  
> -	pm_runtime_mark_last_busy(adev->dev);
> -	pm_runtime_put_autosuspend(adev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return count;
>  
>  err_out:
> -	pm_runtime_mark_last_busy(adev->dev);
> -	pm_runtime_put_autosuspend(adev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return ret;
>  }
> @@ -4785,16 +4634,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
>  static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
> -	struct drm_device *dev = adev_to_drm(adev);
>  	u64 flags = 0;
>  	int r;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> -
> -	r = pm_runtime_resume_and_get(dev->dev);
> +	r = amdgpu_pm_get_access(adev);
>  	if (r < 0)
>  		return r;
>  
> @@ -4811,7 +4654,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
>  	seq_printf(m, "\n");
>  
>  out:
> -	pm_runtime_put_autosuspend(dev->dev);
> +	amdgpu_pm_put_access(adev);
>  
>  	return r;
>  }
> @@ -4831,10 +4674,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
>  	void *smu_prv_buf;
>  	int ret = 0;
>  
> -	if (amdgpu_in_reset(adev))
> -		return -EPERM;
> -	if (adev->in_suspend && !adev->in_runpm)
> -		return -EPERM;
> +	ret = amdgpu_pm_dev_state_check(adev);
> +	if (ret)
> +		return ret;
>  
>  	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
>  	if (ret)




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux