RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi  Christian


Thanks for your review. May I use the kzalloc to allocate the memory for the buffer to avoid the stack problem you said?

Because hypervisor driver will transfer the message through this page size memory.

Best Regards
Yintian Tao

-----Original Message-----
From: Christian König <ckoenig.leichtzumerken@xxxxxxxxx> 
Sent: Wednesday, April 10, 2019 6:32 PM
To: Quan, Evan <Evan.Quan@xxxxxxx>; Tao, Yintian <Yintian.Tao@xxxxxxx>; amd-gfx@xxxxxxxxxxxxxxxxxxxxx
Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Am 10.04.19 um 11:58 schrieb Quan, Evan:
>
>> -----Original Message-----
>> From: amd-gfx <amd-gfx-bounces@xxxxxxxxxxxxxxxxxxxxx> On Behalf Of 
>> Yintian Tao
>> Sent: 2019年4月9日 23:18
>> To: amd-gfx@xxxxxxxxxxxxxxxxxxxxx
>> Cc: Tao, Yintian <Yintian.Tao@xxxxxxx>
>> Subject: [PATCH] drm/amdgpu: support dpm level modification under 
>> virtualization
>>
>> Under vega10 virtualuzation, smu ip block will not be added.
>> Therefore, we need add pp clk query and force dpm level function at 
>> amdgpu_virt_ops to support the feature.
>>
>> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
>> Signed-off-by: Yintian Tao <yttao@xxxxxxx>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
>> ++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>>   7 files changed, 147 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 3ff8899..bb0fd5a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device 
>> *adev,
>>   	mutex_init(&adev->virt.vf_errors.lock);
>>   	hash_init(adev->mn_hash);
>>   	mutex_init(&adev->lock_reset);
>> +	mutex_init(&adev->virt.dpm_mutex);
>>
>>   	amdgpu_device_check_arguments(adev);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index 6190495..1353955 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device 
>> *dev, void *data, struct drm_file
>>   		if (adev->pm.dpm_enabled) {
>>   			dev_info.max_engine_clock =
>> amdgpu_dpm_get_sclk(adev, false) * 10;
>>   			dev_info.max_memory_clock =
>> amdgpu_dpm_get_mclk(adev, false) * 10;
>> +		} else if (amdgpu_sriov_vf(adev)) {
>> +			dev_info.max_engine_clock =
>> amdgpu_virt_get_sclk(adev, false) * 10;
>> +			dev_info.max_memory_clock =
>> amdgpu_virt_get_mclk(adev, false) * 10;
>>   		} else {
>>   			dev_info.max_engine_clock = adev-
>>> clock.default_sclk * 10;
>>   			dev_info.max_memory_clock = adev-
>>> clock.default_mclk * 10; diff --git
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> index 5540259..0162d1e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> @@ -380,6 +380,17 @@ static ssize_t
>> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>   		goto fail;
>>   	}
>>
>> +        if (amdgpu_sriov_vf(adev)) {
>> +                if (amdgim_is_hwperf(adev) &&
>> +                    adev->virt.ops->force_dpm_level) {
>> +                        mutex_lock(&adev->pm.mutex);
>> +                        adev->virt.ops->force_dpm_level(adev, level);
>> +                        mutex_unlock(&adev->pm.mutex);
>> +                        return count;
>> +                } else
>> +                        return -EINVAL;
>> +        }
>> +
>>   	if (current_level == level)
>>   		return count;
>>
>> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct 
>> device *dev,
>>   	struct drm_device *ddev = dev_get_drvdata(dev);
>>   	struct amdgpu_device *adev = ddev->dev_private;
>>
>> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
>> +	    adev->virt.ops->get_pp_clk)
>> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>> +
>>   	if (is_support_sw_smu(adev))
>>   		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>>   	else if (adev->powerplay.pp_funcs->print_clock_levels)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> index 462a04e..ae4b2a1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
>> amdgpu_device *adev)
>>   	}
>>   }
>>
>> +static uint32_t parse_clk(char *buf, bool min) {
>> +        char *ptr = buf;
>> +        uint32_t clk = 0;
>> +
>> +        do {
>> +                ptr = strchr(ptr, ':');
>> +                if (!ptr)
>> +                        break;
>> +                ptr+=2;
>> +                clk = simple_strtoul(ptr, NULL, 10);
>> +        } while (!min);
>> +
>> +        return clk * 100;
>> +}
>> +
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
>> +lowest) {
>> +        char buf[512] = {0};
> [Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.

Well at least on the stack that would be a really bad idea. Where have you seen that?

Saying so 512 is a bit large as well and additional to that please stop initializing things with "{0}", use memset for that.

Christian.

>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> [Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
>> +
>> +        return parse_clk(buf, lowest); }
>> +
>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool 
>> +lowest) {
>> +        char buf[512] = {0};
>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
>> +
>> +        return parse_clk(buf, lowest); }
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> index 722deef..584947b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>>   	int (*reset_gpu)(struct amdgpu_device *adev);
>>   	int (*wait_reset)(struct amdgpu_device *adev);
>>   	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
>> u32 data2, u32 data3);
>> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
>> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>>   };
>>
>>   /*
>> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>>   	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>>   	/* VRAM LOST by GIM */
>>   	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
>> +	/* HW PERF SIM in GIM */
>> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>>   };
>>
>>   struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct 
>> amdgpu_virt {
>>   	struct amdgpu_vf_error_buffer   vf_errors;
>>   	struct amdgpu_virt_fw_reserve	fw_reserve;
>>   	uint32_t gim_feature;
>> +	/* protect DPM events to GIM */
>> +	struct mutex                    dpm_mutex;
>>   };
>>
>>   #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static 
>> inline bool is_virtual_machine(void)  #endif  }
>>
>> +#define amdgim_is_hwperf(adev) \
>> +	((adev)->virt.gim_feature &
>> AMDGIM_FEATURE_HW_PERF_SIMULATION)
>> +
>>   bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void 
>> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t 
>> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
>> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, 
>> unsigned long obj_size,
>>   					unsigned int key,
>>   					unsigned int chksum);
>>   void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
>> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, 
>> +bool lowest);
>>
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> index 73851eb..8dbad49 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct 
>> amdgpu_device *adev,
>>   	xgpu_ai_mailbox_set_valid(adev, false);  }
>>
>> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
>> +char *buf) {
>> +        int r = 0;
>> +        u32 req, val, size;
>> +
>> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
>> +                return -EBADRQC;
>> +
>> +        switch(type) {
>> +        case PP_SCLK:
>> +                req = IDH_IRQ_GET_PP_SCLK;
>> +                break;
>> +        case PP_MCLK:
>> +                req = IDH_IRQ_GET_PP_MCLK;
>> +                break;
>> +        default:
>> +                return -EBADRQC;
>> +        }
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +
>> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r && adev->fw_vram_usage.va != NULL) {
>> +                val = RREG32_NO_KIQ(
>> +                        SOC15_REG_OFFSET(NBIO, 0,
>> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
>> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
>> +                                val), PAGE_SIZE);
>> +
>> +                if (size < PAGE_SIZE)
>> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
>> +                else
>> +                        size = 0;
>> +
>> +                r = size;
>> +                goto out;
>> +        }
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if(r)
>> +                pr_info("%s DPM request failed",
>> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
>> +level) {
>> +        int r = 0;
>> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
>> +
>> +        if (!amdgim_is_hwperf(adev))
>> +                return -EBADRQC;
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r)
>> +                goto out;
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if (!r)
>> +                pr_info("DPM request failed");
>> +        else
>> +                pr_info("Mailbox is broken");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>>   static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>>   					enum idh_request req)
>>   {
>> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>>   	.reset_gpu = xgpu_ai_request_reset,
>>   	.wait_reset = NULL,
>>   	.trans_msg = xgpu_ai_mailbox_trans_msg,
>> +	.get_pp_clk = xgpu_ai_get_pp_clk,
>> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>>   };
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> index b4a9cee..39d151b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> @@ -35,6 +35,10 @@ enum idh_request {
>>   	IDH_REL_GPU_FINI_ACCESS,
>>   	IDH_REQ_GPU_RESET_ACCESS,
>>
>> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
>> +	IDH_IRQ_GET_PP_SCLK,
>> +	IDH_IRQ_GET_PP_MCLK,
>> +
>>   	IDH_LOG_VF_ERROR       = 200,
>>   };
>>
>> @@ -43,6 +47,8 @@ enum idh_event {
>>   	IDH_READY_TO_ACCESS_GPU,
>>   	IDH_FLR_NOTIFICATION,
>>   	IDH_FLR_NOTIFICATION_CMPL,
>> +	IDH_SUCCESS,
>> +	IDH_FAIL,
>>   	IDH_EVENT_MAX
>>   };
>>
>> --
>> 2.7.4
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@xxxxxxxxxxxxxxxxxxxxx
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux