From: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> Unify our approach to figuring out how many wm levels are supported by having dev_priv->wm.num_levels. This replaces the older dev_priv->wm.max_level which was used on some of the platforms. I think num_levels is less confusing than max_level in most places. The +/-1 is now mostly isolated to the mempry latency init code. v2: Rebase Signed-off-by: Ville Syrjälä <ville.syrjala@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 24 +---- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/intel_display.c | 6 +- drivers/gpu/drm/i915/intel_pm.c | 131 +++++++++++++-------------- 4 files changed, 69 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8fd783aa226e..d3250c674eb8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3789,17 +3789,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) { struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; - int level; - int num_levels; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; + int level, num_levels = dev_priv->wm.num_levels; drm_modeset_lock_all(dev); @@ -3880,20 +3870,10 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; uint16_t new[8] = { 0 }; - int num_levels; - int level; + int level, num_levels = dev_priv->wm.num_levels; int ret; char tmp[32]; - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - if (len >= sizeof(tmp)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 406f4123eab6..e6d6ec2e0e71 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1902,8 +1902,6 @@ struct drm_i915_private { struct g4x_wm_values g4x; }; - uint8_t max_level; - /* * Should be held around atomic WM register writing; also * protects * intel_crtc->wm.active and @@ -1911,6 +1909,8 @@ struct drm_i915_private { */ struct mutex wm_mutex; + u8 num_levels; + /* * Set during HW readout of watermarks/DDB. Some platforms * need to know when we're still using BIOS-provided values diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fe045abb6472..f4dd297d4804 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11802,7 +11802,7 @@ static void verify_wm_state(struct drm_crtc *crtc, struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const enum pipe pipe = intel_crtc->pipe; - int plane, level, max_level = ilk_wm_max_level(dev_priv); + int plane, level, num_levels = dev_priv->wm.num_levels; if (INTEL_GEN(dev_priv) < 9 || !new_state->active) return; @@ -11824,7 +11824,7 @@ static void verify_wm_state(struct drm_crtc *crtc, sw_plane_wm = &sw_wm->planes[plane]; /* Watermarks */ - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { if (skl_wm_level_equals(&hw_plane_wm->wm[level], &sw_plane_wm->wm[level])) continue; @@ -11874,7 +11874,7 @@ static void verify_wm_state(struct drm_crtc *crtc, sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; /* Watermarks */ - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { if (skl_wm_level_equals(&hw_plane_wm->wm[level], &sw_plane_wm->wm[level])) continue; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 22cbb8e25f5b..4ffaa17fc4e0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -796,23 +796,18 @@ static bool is_enabling(int old, int new, int threshold) return old < threshold && new >= threshold; } -static int intel_wm_num_levels(struct drm_i915_private *dev_priv) -{ - return dev_priv->wm.max_level + 1; -} - static void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, const uint16_t wm[8]) { - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { unsigned int latency = wm[level]; if (latency == 0) { - DRM_DEBUG_KMS("%s WM%d latency not provided\n", - name, level); + DRM_ERROR("%s WM%d latency not provided\n", + name, level); continue; } @@ -1080,7 +1075,7 @@ static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12 * 10; dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35 * 10; - dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; + dev_priv->wm.num_levels = G4X_WM_LEVEL_HPLL + 1; memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, sizeof(dev_priv->wm.pri_latency)); @@ -1204,7 +1199,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); bool dirty = false; - for (; level < intel_wm_num_levels(dev_priv); level++) { + for (; level < dev_priv->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->plane[plane_id] != value; @@ -1223,7 +1218,7 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, /* NORMAL level doesn't have an FBC watermark */ level = max(level, G4X_WM_LEVEL_SR); - for (; level < intel_wm_num_levels(dev_priv); level++) { + for (; level < dev_priv->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->fbc != value; @@ -1241,7 +1236,8 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + int num_levels = dev_priv->wm.num_levels; enum plane_id plane_id = plane->id; bool dirty = false; int level; @@ -1321,7 +1317,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - if (level > dev_priv->wm.max_level) + if (level >= dev_priv->wm.num_levels) return false; return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && @@ -1634,13 +1630,13 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) /* all latencies in .1 usec */ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3 * 10; - dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; + dev_priv->wm.num_levels = VLV_WM_LEVEL_PM2 + 1; if (IS_CHERRYVIEW(dev_priv)) { dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12 * 10; dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33 * 10; - dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; + dev_priv->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; } memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, @@ -1783,7 +1779,7 @@ static void vlv_invalidate_wms(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - for (; level < intel_wm_num_levels(dev_priv); level++) { + for (; level < dev_priv->wm.num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) @@ -1810,7 +1806,7 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - int num_levels = intel_wm_num_levels(dev_priv); + int num_levels = dev_priv->wm.num_levels; bool dirty = false; for (; level < num_levels; level++) { @@ -1827,8 +1823,9 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); + int num_levels = dev_priv->wm.num_levels; int level; bool dirty = false; @@ -1942,7 +1939,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) } /* initially allow all levels */ - wm_state->num_levels = intel_wm_num_levels(dev_priv); + wm_state->num_levels = dev_priv->wm.num_levels; /* * Note that enabling cxsr with no primary/sprite planes * enabled can wedge the pipe. Hence we only allow cxsr @@ -2143,7 +2140,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int num_active_crtcs = 0; - wm->level = dev_priv->wm.max_level; + wm->level = dev_priv->wm.num_levels - 1; wm->cxsr = true; for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -2663,7 +2660,7 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) } /* Calculate the maximum primary/sprite plane watermark */ -static unsigned int ilk_plane_wm_max(const struct drm_device *dev, +static unsigned int ilk_plane_wm_max(struct drm_device *dev, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, @@ -2705,7 +2702,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, } /* Calculate the maximum cursor plane watermark */ -static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, +static unsigned int ilk_cursor_wm_max(struct drm_device *dev, int level, const struct intel_wm_config *config) { @@ -2717,7 +2714,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, return ilk_cursor_wm_reg_max(to_i915(dev), level); } -static void ilk_compute_wm_maximums(const struct drm_device *dev, +static void ilk_compute_wm_maximums(struct drm_device *dev, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, @@ -2837,7 +2834,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 9) { uint32_t val; int ret, i; - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ @@ -2885,9 +2882,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, * need to be disabled. We make sure to sanitize the values out * of the punit to satisfy this requirement. */ - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { if (wm[level] == 0) { - for (i = level + 1; i <= max_level; i++) + for (i = level + 1; i < num_levels; i++) wm[i] = 0; break; } @@ -2902,7 +2899,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, */ if (wm[0] == 0) { wm[0] += 2; - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { if (wm[level] == 0) break; wm[level] += 2; @@ -2966,30 +2963,17 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, static void ilk_fixup_wm_latency_units(struct drm_i915_private *dev_priv, u16 wm[5]) { - int level, num_levels = ilk_wm_max_level(dev_priv) + 1; + int level, num_levels = dev_priv->wm.num_levels; /* convert .5 usec to .1 usec units */ for (level = 1; level < num_levels; level++) wm[level] *= 5; } -int ilk_wm_max_level(const struct drm_i915_private *dev_priv) -{ - /* how many WM levels are we expecting */ - if (INTEL_GEN(dev_priv) >= 9) - return 7; - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - return 4; - else if (INTEL_GEN(dev_priv) >= 6) - return 3; - else - return 2; -} - static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, u16 wm[5], u16 min) { - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; if (wm[0] >= min) return false; @@ -2998,7 +2982,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, /* WM1+ latencies must be multiples of .5 usec */ min = roundup(min, 5); - for (level = 1; level <= max_level; level++) + for (level = 1; level < num_levels; level++) wm[level] = max(wm[level], min); return true; @@ -3027,6 +3011,13 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + dev_priv->wm.num_levels = 5; + else if (INTEL_GEN(dev_priv) >= 6) + dev_priv->wm.num_levels = 4; + else + dev_priv->wm.num_levels = 3; + intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency); @@ -3050,7 +3041,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv, u16 wm[8]) { - int level, num_levels = ilk_wm_max_level(dev_priv) + 1; + int level, num_levels = dev_priv->wm.num_levels; /* convert usec to .1 usec units */ for (level = 0; level < num_levels; level++) @@ -3059,6 +3050,8 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv, static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) { + dev_priv->wm.num_levels = 8; + intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency); @@ -3109,7 +3102,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) const struct intel_plane_state *pristate = NULL; const struct intel_plane_state *sprstate = NULL; const struct intel_plane_state *curstate = NULL; - int level, max_level = ilk_wm_max_level(dev_priv), usable_level; + int level, num_levels = dev_priv->wm.num_levels, usable_level; struct ilk_wm_maximums max; pipe_wm = &cstate->wm.ilk.optimal; @@ -3133,7 +3126,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16); } - usable_level = max_level; + usable_level = num_levels - 1; /* ILK/SNB: LP2+ watermarks only w/o sprites */ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled) @@ -3184,13 +3177,14 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *newstate) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_atomic_state *intel_state = to_intel_atomic_state(newstate->base.state); const struct intel_crtc_state *oldstate = intel_atomic_get_old_crtc_state(intel_state, intel_crtc); const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; - int level, max_level = ilk_wm_max_level(to_i915(dev)); + int level, num_levels = dev_priv->wm.num_levels; /* * Start with the final, target watermarks, then combine with the @@ -3205,7 +3199,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { struct intel_wm_level *a_wm = &a->wm[level]; const struct intel_wm_level *b_wm = &b->wm[level]; @@ -3277,8 +3271,8 @@ static void ilk_wm_merge(struct drm_device *dev, struct intel_pipe_wm *merged) { struct drm_i915_private *dev_priv = to_i915(dev); - int level, max_level = ilk_wm_max_level(dev_priv); - int last_enabled_level = max_level; + int level, num_levels = dev_priv->wm.num_levels; + int last_enabled_level = num_levels - 1; /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && @@ -3289,7 +3283,7 @@ static void ilk_wm_merge(struct drm_device *dev, merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6; /* merge each WM1+ level */ - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; ilk_merge_wm_level(dev, level, wm); @@ -3319,7 +3313,7 @@ static void ilk_wm_merge(struct drm_device *dev, */ if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled && intel_fbc_is_active(dev_priv)) { - for (level = 2; level <= max_level; level++) { + for (level = 2; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; wm->enable = false; @@ -3419,10 +3413,11 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, struct intel_pipe_wm *r1, struct intel_pipe_wm *r2) { - int level, max_level = ilk_wm_max_level(to_i915(dev)); + struct drm_i915_private *dev_priv = to_i915(dev); + int level, num_levels = dev_priv->wm.num_levels; int level1 = 0, level2 = 0; - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { if (r1->wm[level].enable) level1 = level; if (r2->wm[level].enable) @@ -3798,7 +3793,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) continue; /* Find the highest enabled wm level for this plane */ - for (level = ilk_wm_max_level(dev_priv); + for (level = dev_priv->wm.num_levels - 1; !wm->wm[level].plane_en; --level) { } @@ -4900,14 +4895,14 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, struct skl_plane_wm *wm, struct skl_wm_level *levels) { - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; struct skl_wm_level *result_prev = &levels[0]; int ret; if (WARN_ON(!intel_pstate->base.fb)) return -EINVAL; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { struct skl_wm_level *result = &levels[level]; ret = skl_compute_plane_wm(dev_priv, @@ -5181,10 +5176,10 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; enum pipe pipe = intel_crtc->pipe; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), &wm->wm[level]); } @@ -5212,10 +5207,10 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; enum pipe pipe = intel_crtc->pipe; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { skl_write_wm_level(dev_priv, CUR_WM(pipe, level), &wm->wm[level]); } @@ -5691,16 +5686,14 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; - int level, max_level; + int level, num_levels = dev_priv->wm.num_levels; enum plane_id plane_id; uint32_t val; - max_level = ilk_wm_max_level(dev_priv); - for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_plane_wm *wm = &out->planes[plane_id]; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < num_levels; level++) { if (plane_id != PLANE_CURSOR) val = I915_READ(PLANE_WM(pipe, plane_id, level)); else @@ -5794,14 +5787,14 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; active->linetime = hw->wm_linetime[pipe]; } else { - int level, max_level = ilk_wm_max_level(dev_priv); + int level, num_levels = dev_priv->wm.num_levels; /* * For inactive pipes, all watermark levels * should be marked as enabled but zeroed, * which is what we'd compute them to. */ - for (level = 0; level <= max_level; level++) + for (level = 0; level < num_levels; level++) active->wm[level].enable = true; } @@ -6092,7 +6085,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); - dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; + dev_priv->wm.num_levels = VLV_WM_LEVEL_PM5 + 1; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if ((val & FORCE_DDR_HIGH_FREQ) == 0) -- 2.18.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx