Move functions to intel_sseu.h and remove inline qualifier.
Additionally, ensure these are all prefixed with intel_sseu_*
to match the convention of other functions in i915.
v2: fix spacing from checkpatch warning
v3: squash helper function changes into a single patch
break 80 character line to fix checkpatch warning
move get/set_eus helpers to intel_device_info.c
Acked-by: Jani Nikula <jani.nikula@xxxxxxxxx>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@xxxxxxxxx>
Signed-off-by: Stuart Summers <stuart.summers@xxxxxxxxx>
---
drivers/gpu/drm/i915/gt/intel_sseu.c | 17 ++++
drivers/gpu/drm/i915/gt/intel_sseu.h | 10 +--
drivers/gpu/drm/i915/i915_debugfs.c | 4 +-
drivers/gpu/drm/i915/i915_drv.c | 2 +-
drivers/gpu/drm/i915/intel_device_info.c | 103 ++++++++++++++++-------
drivers/gpu/drm/i915/intel_device_info.h | 44 ----------
6 files changed, 97 insertions(+), 83 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 7f448f3bea0b..a0756f006f5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,23 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ unsigned int i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
+ total += hweight8(sseu->subslice_mask[i]);
+
+ return total;
+}
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
+{
+ return hweight8(sseu->subslice_mask[slice]);
+}
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 9618dff46d83..b50d0401a4e2 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -63,11 +63,11 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
-static inline unsigned int
-intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
-{
- return hweight8(sseu->subslice_mask[slice]);
-}
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dceb32a16c5c..fce3ccd87f76 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4160,7 +4160,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
}
sseu->eu_total = sseu->eu_per_subslice *
- sseu_subslice_total(sseu);
+ intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
@@ -4184,7 +4184,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
seq_printf(m, " %s Slice Total: %u\n", type,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
- sseu_subslice_total(sseu));
+ intel_sseu_subslice_total(sseu));
for (s = 0; s < fls(sseu->slice_mask); s++) {
seq_printf(m, " %s Slice%i subslices: %u\n", type,
s, intel_sseu_subslices_per_slice(sseu, s));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dcc872f9c676..c2ea3f0992b2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -382,7 +382,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = sseu_subslice_total(sseu);
+ value = intel_sseu_subslice_total(sseu);
if (!value)
return -ENODEV;
break;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 9d6b9c45bc5e..689702b28e80 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -90,7 +90,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "slice total: %u, mask=%04x\n",
hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
@@ -114,6 +114,43 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
info->cs_timestamp_frequency_khz);
}
+static int intel_sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ return slice * slice_stride + subslice * subslice_stride;
+}
+
+static u16 intel_sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int i, offset = intel_sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
+ (i * BITS_PER_BYTE);
+ }
+
+ return eu_mask;
+}
+
+static void intel_sseu_set_eus(struct sseu_dev_info *sseu, int slice,
+ int subslice, u16 eu_mask)
+{
+ int i, offset = intel_sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+}
+
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
@@ -130,7 +167,7 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
sseu->subslice_mask[s]);
for (ss = 0; ss < sseu->max_subslices; ss++) {
- u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+ u16 enabled_eus = intel_sseu_get_eus(sseu, s, ss);
drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
ss, hweight16(enabled_eus), enabled_eus);
@@ -180,7 +217,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu->subslice_mask[s] & BIT(ss))
- sseu_set_eus(sseu, s, ss, eu_en);
+ intel_sseu_set_eus(sseu, s, ss, eu_en);
}
}
}
@@ -222,32 +259,32 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
for (ss = 0; ss < sseu->max_subslices; ss++)
- sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+ intel_sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
/* Slice1 */
- sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+ intel_sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
eu_en = ~I915_READ(GEN8_EU_DISABLE1);
- sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+ intel_sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
/* Slice2 */
- sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+ intel_sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+ intel_sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
/* Slice3 */
- sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+ intel_sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
eu_en = ~I915_READ(GEN8_EU_DISABLE2);
- sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+ intel_sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
/* Slice4 */
- sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+ intel_sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+ intel_sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
/* Slice5 */
- sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+ intel_sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
- sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+ intel_sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
/* Do a second pass where we mark the subslices disabled if all their
* eus are off.
*/
for (s = 0; s < sseu->max_slices; s++) {
for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu_get_eus(sseu, s, ss) == 0)
+ if (intel_sseu_get_eus(sseu, s, ss) == 0)
sseu->subslice_mask[s] &= ~BIT(ss);
}
}
@@ -260,9 +297,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
* EU in any one subslice may be fused off for die
* recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/* No restrictions on Power Gating */
sseu->has_slice_pg = 1;
@@ -290,7 +328,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
sseu->subslice_mask[0] |= BIT(0);
- sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+ intel_sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
@@ -301,7 +339,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
sseu->subslice_mask[0] |= BIT(1);
- sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+ intel_sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
sseu->eu_total = compute_eu_total(sseu);
@@ -310,8 +348,9 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* CHV expected to always have a uniform distribution of EU
* across subslices.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
- sseu->eu_total / sseu_subslice_total(sseu) :
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
0;
/*
* CHV supports subslice power gating on devices with more than
@@ -319,7 +358,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* more than one EU pair per subslice.
*/
sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
}
@@ -369,7 +408,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ intel_sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
eu_per_ss = sseu->max_eus_per_subslice -
hweight8(eu_disabled_mask);
@@ -393,9 +432,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* SKL+ supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
@@ -407,7 +447,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_slice_pg =
!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
- IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
+ IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) {
@@ -477,7 +517,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
eu_disabled_mask =
eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ intel_sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
n_disabled = hweight8(eu_disabled_mask);
@@ -496,9 +536,10 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* BDW supports slice power gating on devices with more than
@@ -561,8 +602,8 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
for (s = 0; s < sseu->max_slices; s++) {
for (ss = 0; ss < sseu->max_subslices; ss++) {
- sseu_set_eus(sseu, s, ss,
- (1UL << sseu->eu_per_subslice) - 1);
+ intel_sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 9d43f7edfd63..6412a9c72898 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -218,50 +218,6 @@ struct intel_driver_caps {
bool has_logical_contexts:1;
};
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- unsigned int i, total = 0;
-
- for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
- total += hweight8(sseu->subslice_mask[i]);
-
- return total;
-}
-
-static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- int slice_stride = sseu->max_subslices * subslice_stride;
-
- return slice * slice_stride + subslice * subslice_stride;
-}
-
-static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
-
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
- eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
- (i * BITS_PER_BYTE);
- }
-
- return eu_mask;
-}
-
-static inline void sseu_set_eus(struct sseu_dev_info *sseu,
- int slice, int subslice, u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
- }
-}
-
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);