Re: [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Some nit-picks inline. Looks good otherwise.

On 2019-12-18 2:04 p.m., Jonathan Kim wrote:
The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

v2: Roll back reset_perfmon_cntr to void return since new perfmon
counters are now safe to write to during DF C-States.  Do single perfmon
controller re-arm when counter is deferred in pmc_count versus multiple
perfmon controller re-arms that could last 1 millisecond. Avoid holding
spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm
defer function to be less confusing.

Signed-off-by: Jonathan Kim <Jonathan.Kim@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 149 +++++++++++++++++++++++----
  1 file changed, 127 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..56dead3a9718 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
  	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  }
+/* same as perfmon arm but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	unsigned long flags, address, data;
+	uint32_t lo_val_rb, hi_val_rb;
+
+	address = adev->nbio.funcs->get_pcie_index_offset(adev);
+	data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(address, lo_addr);
+	WREG32(data, lo_val);
+	WREG32(address, hi_addr);
+	WREG32(data, hi_val);
+
+	WREG32(address, lo_addr);
+	lo_val_rb = RREG32(data);
+	WREG32(address, hi_addr);
+	hi_val_rb = RREG32(data);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+		return -EBUSY;
+
+	return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT	1000
+#define ARM_RETRY_USEC_INTERVAL	100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+	while (countdown) {
+
+		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+						     hi_addr, hi_val))
+			break;
+
+		countdown -= ARM_RETRY_USEC_INTERVAL;
+		udelay(ARM_RETRY_USEC_INTERVAL);
+	}
+
+	return countdown > 0 ? 0 : -ETIME;
+}
+
  /* get the number of df counters available */
  static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
  		struct device_attribute *attr,
@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
  	switch (target_cntr) {
case 0:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
  		break;
  	case 1:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
  		break;
  	case 2:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
  		break;
  	case 3:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
  		break;
}
@@ -422,6 +477,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
  	return -ENOSPC;
  }
+#define DEFERRED_ARM_MASK (1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+				    uint64_t config, bool is_deferred)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	if (is_deferred)
+		adev->df_perfmon_config_assign_mask[target_cntr] |=
+							DEFERRED_ARM_MASK;
+	else
+		adev->df_perfmon_config_assign_mask[target_cntr] &=
+							~DEFERRED_ARM_MASK;
+
+	return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+				    uint64_t config)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;

This error code won't be returned correctly because the function returns bool.


+
+	return (adev->df_perfmon_config_assign_mask[target_cntr]
+							& DEFERRED_ARM_MASK);
+
+}
+
  /* release performance counter */
  static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
  				     uint64_t config)
@@ -451,29 +542,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
  			     int is_enable)
  {
  	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-	int ret = 0;
+	int err = 0, ret = 0;
switch (adev->asic_type) {
  	case CHIP_VEGA20:
+		if (is_enable)
+			return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config); - if (is_enable) {
-			ret = df_v3_6_pmc_add_cntr(adev, config);
-		} else {
-			ret = df_v3_6_pmc_get_ctrl_settings(adev,
+		ret = df_v3_6_pmc_get_ctrl_settings(adev,
  					config,
  					&lo_base_addr,
  					&hi_base_addr,
  					&lo_val,
  					&hi_val);
- if (ret)
-				return ret;
+		if (ret)
+			return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-					hi_base_addr, hi_val);
-		}
+		err = df_v3_6_perfmon_arm_with_retry(adev,
+						     lo_base_addr,
+						     lo_val,
+						     hi_base_addr,
+						     hi_val);
+
+		if (err)
+			ret = df_v3_6_pmc_set_deferred(adev, config, 1);

Pass "true" for the boolean parameter.


break;
  	default:
@@ -501,7 +596,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
  		if (ret)
  			return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+		df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable)
  			df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +613,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
  				  uint64_t config,
  				  uint64_t *count)
  {
-	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
  	*count = 0;
switch (adev->asic_type) {
  	case CHIP_VEGA20:
-
  		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
  				      &hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
  			return;
+ /* rearm the counter or throw away count value on failure */
+		if (df_v3_6_pmc_is_deferred(adev, config)) {
+			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+							lo_base_addr, lo_val,
+							hi_base_addr, hi_val);
+
+			if (!rearm_err)
+				df_v3_6_pmc_set_deferred(adev, config, 0);

Pass "false" for the boolean parameter.

Also, I find the logic here a bit convoluted. This would be clearer IMO:

    if (rearm_err)
        return;

    df_v3_6_pmc_set_deferred(adev, config, false);
    ...


Regards,
  Felix


+			else
+				return;
+		}
+
  		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
  				hi_base_addr, &hi_val);
@@ -542,7 +648,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
  			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break;
-
  	default:
  		break;
  	}
_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux