Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Anshuman,

On 09-10-2023 19:06, Gupta, Anshuman wrote:


On 10/6/2023 10:32 PM, Badal Nilawar wrote:
Take hwmon_lock while accessing hwmon rw attributes. For readonly
attributes its not required to take lock as reads are protected
by sysfs layer and therefore sequential.

Cc: Ashutosh Dixit <ashutosh.dixit@xxxxxxxxx>
Cc: Anshuman Gupta <anshuman.gupta@xxxxxxxxx>
Signed-off-by: Badal Nilawar <badal.nilawar@xxxxxxxxx>
---
  drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
  1 file changed, 71 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 9ac05994a967..391f9a8dd9d7 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
  struct xe_hwmon {
      struct device *hwmon_dev;
      struct xe_gt *gt;
-    struct mutex hwmon_lock; /* rmw operations*/
+    struct mutex hwmon_lock;    /* For rw attributes */
      int scl_shift_power;
      int scl_shift_energy;
      struct xe_hwmon_energy_info ei;    /*  Energy info for energy1_input */ @@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
      return reg.raw;
  }
-static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
-                enum xe_hwmon_reg_operation operation, u32 *value,
-                u32 clr, u32 set)
+static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+                 enum xe_hwmon_reg_operation operation, u32 *value,
+                 u32 clr, u32 set)
  {
      struct xe_reg reg;
      reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
      if (!reg.raw)
-        return -EOPNOTSUPP;
+        return;
Don't we need to report this as warning?
What is possiblity of code path landing here.
Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not needed when reg is not supported by platform. During runtime code path will not fall here as visible functions are taking care of creating hwmon entries only if regs are supported. So it doesn't make sense to add warn here.
      switch (operation) {
      case REG_READ:
          *value = xe_mmio_read32(hwmon->gt, reg);
-        return 0;
+        break;
      case REG_WRITE:
          xe_mmio_write32(hwmon->gt, reg, *value);
-        return 0;
+        break;
      case REG_RMW:
          *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
-        return 0;
+        break;
      default:
          drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
               operation);
-        return -EOPNOTSUPP;
+        break;
      }
  }
-static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
-                       enum xe_hwmon_reg hwmon_reg, u64 *value)
+static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
+                    enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it possible to pass len of void * value to xe_hwmon_process_reg()
so we can wrap this fucntion in xe_hwmon_process_reg ?
Yes its possible but I feel it would be more useful if there are regs of variable lengths i.e. other than 64 or 32 bit. As of now hwmon regs are 32 and 64 bit lenghts so I prefered 2 separate functions. If you insist I will wrap.

Regards,
Badal

  {
      struct xe_reg reg;
      reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
      if (!reg.raw)
-        return -EOPNOTSUPP;
+        return;
      *value = xe_mmio_read64_2x32(hwmon->gt, reg);
-
-    return 0;
  }
  #define PL1_DISABLE 0
@@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,    * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
   * clamped values when read.
   */
-static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
  {
      u32 reg_val;
      u64 reg_val64, min, max;
+    mutex_lock(&hwmon->hwmon_lock);
+
      xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, &reg_val, 0, 0);
      /* Check if PL1 limit is disabled */
      if (!(reg_val & PKG_PWR_LIM_1_EN)) {
          *value = PL1_DISABLE;
-        return 0;
+        goto unlock;
      }
      reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
@@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
      if (min && max)
          *value = clamp_t(u64, *value, min, max);
-
-    return 0;
+unlock:
+    mutex_unlock(&hwmon->hwmon_lock);
  }
  static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
  {
+    int ret = 0;
      u32 reg_val;
+    mutex_lock(&hwmon->hwmon_lock);
+
      /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
      if (value == PL1_DISABLE) {
          xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, &reg_val, @@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)           xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, &reg_val,
                       PKG_PWR_LIM_1_EN, 0);
-        if (reg_val & PKG_PWR_LIM_1_EN)
-            return -EOPNOTSUPP;
+        if (reg_val & PKG_PWR_LIM_1_EN) {
+            ret = -EOPNOTSUPP;
+            goto unlock;
+        }
      }
      /* Computation in 64-bits to avoid overflow. Round to nearest. */
@@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
      xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, &reg_val,
                   PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
-
-    return 0;
+unlock:
+    mutex_unlock(&hwmon->hwmon_lock);
+    return ret;
  }
-static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value) +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
  {
      u32 reg_val;
      xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ, &reg_val, 0, 0);
      reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
      *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
-
-    return 0;
  }
  /*
@@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
      struct xe_hwmon_energy_info *ei = &hwmon->ei;
      u32 reg_val;
-    xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
-    mutex_lock(&hwmon->hwmon_lock);
-
      xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
                   &reg_val, 0, 0);
@@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
      *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
                    hwmon->scl_shift_energy);
-
-    mutex_unlock(&hwmon->hwmon_lock);
-
-    xe_device_mem_access_put(gt_to_xe(hwmon->gt));
  }
  static const struct hwmon_channel_info *hwmon_info[] = {
@@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
                    uval);
  }
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+{
+    int ret;
+    u32 uval;
+
+    mutex_lock(&hwmon->hwmon_lock);
+
+    ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+    if (ret)
+        goto unlock;
+
+    *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
+                 scale_factor, POWER_SETUP_I1_SHIFT);
+unlock:
+    mutex_unlock(&hwmon->hwmon_lock);
+    return ret;
+}
+
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+{
+    int ret;
+    u32 uval;
+
+    mutex_lock(&hwmon->hwmon_lock);
+
+    uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
+    ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+
+    mutex_unlock(&hwmon->hwmon_lock);
+    return ret;
+}
+
  static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
  {
      u32 reg_val;
@@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
  static int
  xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
  {
-    int ret;
-    u32 uval;
-
      switch (attr) {
      case hwmon_power_max:
-        return xe_hwmon_power_max_read(hwmon, val);
+        xe_hwmon_power_max_read(hwmon, val);
+        return 0;
      case hwmon_power_rated_max:
-        return xe_hwmon_power_rated_max_read(hwmon, val);
-    case hwmon_power_crit:
-        ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
-        if (ret)
-            return ret;
-        if (!(uval & POWER_SETUP_I1_WATTS))
-            return -ENODEV;
-        *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
-                       SF_POWER, POWER_SETUP_I1_SHIFT);
+        xe_hwmon_power_rated_max_read(hwmon, val);
          return 0;
+    case hwmon_power_crit:
+        return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
      default:
          return -EOPNOTSUPP;
      }
@@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
  static int
  xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
  {
-    u32 uval;
-
      switch (attr) {
      case hwmon_power_max:
          return xe_hwmon_power_max_write(hwmon, val);
      case hwmon_power_crit:
-        uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
-        return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+        return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
      default:
          return -EOPNOTSUPP;
      }
@@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
  static int
  xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
  {
-    int ret;
-    u32 uval;
-
      switch (attr) {
      case hwmon_curr_crit:
-        ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
-        if (ret)
-            return ret;
-        if (uval & POWER_SETUP_I1_WATTS)
-            return -ENODEV;
-        *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
-                       SF_CURR, POWER_SETUP_I1_SHIFT);
-        return 0;
+        return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
      default:
          return -EOPNOTSUPP;
      }
@@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
  static int
  xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
  {
-    u32 uval;
-
      switch (attr) {
      case hwmon_curr_crit:
-        uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
-        return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+        return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
      default:
          return -EOPNOTSUPP;
      }
@@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
  {
      int ret;
-    xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
      switch (attr) {
      case hwmon_in_input:
          ret = xe_hwmon_get_voltage(hwmon, val);
@@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
          ret = -EOPNOTSUPP;
      }
-    xe_device_mem_access_put(gt_to_xe(hwmon->gt));
-
      return ret;
  }
@@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
      struct xe_hwmon *hwmon = xe->hwmon;
      long energy;
      u32 val_sku_unit = 0;
-    int ret;
-    ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
      /*
       * The contents of register PKG_POWER_SKU_UNIT do not change,
       * so read it once and store the shift values.
       */
-    if (!ret) {
+    if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+        xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);           hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);           hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
      }



[Index of Archives]     [LM Sensors]     [Linux Sound]     [ALSA Users]     [ALSA Devel]     [Linux Audio Users]     [Linux Media]     [Kernel]     [Gimp]     [Yosemite News]     [Linux Media]

  Powered by Linux