Re: [PATCH] gpu/drm/i915: globally replace dev_priv with i915

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 12/06/2019 17:29, Lucas De Marchi wrote:
We are slowly converting dev_priv to i915 everywhere, spread into
smaller series. While this is good to avoid unrelated breakages to other
inflight patches, it's bad because inflight patches on nearby paths keep
breaking. Paired with other code moves and refactores this is becoming a
nightmare.

Now that I915_{READ,WRITE} are gone and implicit access to dev_priv no

Who removed I915_READ/WRITE? In fact I see some in this patch. Well colour me confused.. how did the patch compile?

Plus it's not I915_READ/WRITE - some display macros have hidden dependency on implicit dev_priv as well.

longer exists we can simply sed all files and be done with the
conversion. This was generated with the following commands with no
additional fixups:

	git ls-files --full-name -z -- drivers/gpu/drm/i915/ | \
		xargs -0 sed -i 's/\bdev_priv\b/i915/g'

Any pending series can apply the same search and replace when rebasing.

I am in two minds. Converting piece meal is easier for some use cases, and converting all at once is easier for some others. Don't know.

Regards,

Tvrtko


Signed-off-by: Lucas De Marchi <lucas.demarchi@xxxxxxxxx>
---
  drivers/gpu/drm/i915/dvo_ch7017.c             |    6 +-
  drivers/gpu/drm/i915/dvo_ch7xxx.c             |   10 +-
  drivers/gpu/drm/i915/dvo_ivch.c               |   14 +-
  drivers/gpu/drm/i915/dvo_ns2501.c             |   14 +-
  drivers/gpu/drm/i915/dvo_sil164.c             |   10 +-
  drivers/gpu/drm/i915/dvo_tfp410.c             |   10 +-
  drivers/gpu/drm/i915/gem/i915_gem_context.c   |   48 +-
  drivers/gpu/drm/i915/gem/i915_gem_context.h   |    6 +-
  .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |    4 +-
  drivers/gpu/drm/i915/gem/i915_gem_object.c    |    4 +-
  drivers/gpu/drm/i915/gem/i915_gem_shmem.c     |    4 +-
  drivers/gpu/drm/i915/gem/i915_gem_stolen.c    |  168 +-
  drivers/gpu/drm/i915/gem/i915_gem_tiling.c    |    8 +-
  drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |   40 +-
  .../gpu/drm/i915/gem/selftests/huge_pages.c   |   48 +-
  .../drm/i915/gem/selftests/i915_gem_context.c |    6 +-
  drivers/gpu/drm/i915/gt/intel_engine.h        |    2 +-
  drivers/gpu/drm/i915/gt/intel_engine_cs.c     |   66 +-
  drivers/gpu/drm/i915/gt/intel_hangcheck.c     |   36 +-
  drivers/gpu/drm/i915/gt/intel_mocs.c          |   24 +-
  drivers/gpu/drm/i915/gt/intel_mocs.h          |    2 +-
  drivers/gpu/drm/i915/gt/intel_reset.c         |    6 +-
  drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |   52 +-
  drivers/gpu/drm/i915/gvt/aperture_gm.c        |   60 +-
  drivers/gpu/drm/i915/gvt/cfg_space.c          |    4 +-
  drivers/gpu/drm/i915/gvt/cmd_parser.c         |   34 +-
  drivers/gpu/drm/i915/gvt/debugfs.c            |   12 +-
  drivers/gpu/drm/i915/gvt/display.c            |   48 +-
  drivers/gpu/drm/i915/gvt/dmabuf.c             |   12 +-
  drivers/gpu/drm/i915/gvt/edid.c               |    6 +-
  drivers/gpu/drm/i915/gvt/execlist.c           |   14 +-
  drivers/gpu/drm/i915/gvt/fb_decoder.c         |   12 +-
  drivers/gpu/drm/i915/gvt/firmware.c           |   16 +-
  drivers/gpu/drm/i915/gvt/gtt.c                |   44 +-
  drivers/gpu/drm/i915/gvt/gvt.c                |   26 +-
  drivers/gpu/drm/i915/gvt/gvt.h                |   20 +-
  drivers/gpu/drm/i915/gvt/handlers.c           |   92 +-
  drivers/gpu/drm/i915/gvt/interrupt.c          |    6 +-
  drivers/gpu/drm/i915/gvt/kvmgt.c              |    6 +-
  drivers/gpu/drm/i915/gvt/mmio.c               |    2 +-
  drivers/gpu/drm/i915/gvt/mmio_context.c       |   38 +-
  drivers/gpu/drm/i915/gvt/sched_policy.c       |   12 +-
  drivers/gpu/drm/i915/gvt/scheduler.c          |   72 +-
  drivers/gpu/drm/i915/gvt/vgpu.c               |    6 +-
  drivers/gpu/drm/i915/i915_cmd_parser.c        |    6 +-
  drivers/gpu/drm/i915/i915_debugfs.c           |  952 +++---
  drivers/gpu/drm/i915/i915_debugfs.h           |    4 +-
  drivers/gpu/drm/i915/i915_drv.c               | 1092 +++---
  drivers/gpu/drm/i915/i915_drv.h               |  590 ++--
  drivers/gpu/drm/i915/i915_gem.c               |  356 +-
  drivers/gpu/drm/i915/i915_gem_evict.c         |    6 +-
  drivers/gpu/drm/i915/i915_gem_gtt.c           |  214 +-
  drivers/gpu/drm/i915/i915_gem_gtt.h           |   20 +-
  drivers/gpu/drm/i915/i915_gpu_error.c         |   44 +-
  drivers/gpu/drm/i915/i915_gpu_error.h         |    6 +-
  drivers/gpu/drm/i915/i915_irq.c               | 1808 +++++-----
  drivers/gpu/drm/i915/i915_irq.h               |   86 +-
  drivers/gpu/drm/i915/i915_memcpy.c            |    2 +-
  drivers/gpu/drm/i915/i915_oa_bdw.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_bdw.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_bxt.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_bxt.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_cflgt2.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_cflgt2.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_cflgt3.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_cflgt3.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_chv.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_chv.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_cnl.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_cnl.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_glk.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_glk.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_hsw.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_hsw.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_icl.c            |   32 +-
  drivers/gpu/drm/i915/i915_oa_icl.h            |    2 +-
  drivers/gpu/drm/i915/i915_oa_kblgt2.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_kblgt2.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_kblgt3.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_kblgt3.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_sklgt2.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_sklgt2.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_sklgt3.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_sklgt3.h         |    2 +-
  drivers/gpu/drm/i915/i915_oa_sklgt4.c         |   32 +-
  drivers/gpu/drm/i915/i915_oa_sklgt4.h         |    2 +-
  drivers/gpu/drm/i915/i915_perf.c              |  814 ++---
  drivers/gpu/drm/i915/i915_pmu.c               |   42 +-
  drivers/gpu/drm/i915/i915_query.c             |   10 +-
  drivers/gpu/drm/i915/i915_reg.h               |  420 +--
  drivers/gpu/drm/i915/i915_suspend.c           |  106 +-
  drivers/gpu/drm/i915/i915_sysfs.c             |  172 +-
  drivers/gpu/drm/i915/i915_trace.h             |   46 +-
  drivers/gpu/drm/i915/i915_vgpu.c              |   30 +-
  drivers/gpu/drm/i915/i915_vgpu.h              |   16 +-
  drivers/gpu/drm/i915/i915_vma.c               |    4 +-
  drivers/gpu/drm/i915/icl_dsi.c                |  122 +-
  drivers/gpu/drm/i915/intel_atomic.c           |   28 +-
  drivers/gpu/drm/i915/intel_atomic.h           |    2 +-
  drivers/gpu/drm/i915/intel_audio.c            |  222 +-
  drivers/gpu/drm/i915/intel_audio.h            |    6 +-
  drivers/gpu/drm/i915/intel_bios.c             |  552 ++--
  drivers/gpu/drm/i915/intel_bios.h             |   18 +-
  drivers/gpu/drm/i915/intel_bw.c               |   68 +-
  drivers/gpu/drm/i915/intel_bw.h               |    8 +-
  drivers/gpu/drm/i915/intel_cdclk.c            |  882 ++---
  drivers/gpu/drm/i915/intel_cdclk.h            |   14 +-
  drivers/gpu/drm/i915/intel_color.c            |  176 +-
  drivers/gpu/drm/i915/intel_combo_phy.c        |   64 +-
  drivers/gpu/drm/i915/intel_combo_phy.h        |    6 +-
  drivers/gpu/drm/i915/intel_connector.c        |   12 +-
  drivers/gpu/drm/i915/intel_crt.c              |  172 +-
  drivers/gpu/drm/i915/intel_crt.h              |    4 +-
  drivers/gpu/drm/i915/intel_csr.c              |  144 +-
  drivers/gpu/drm/i915/intel_ddi.c              |  592 ++--
  drivers/gpu/drm/i915/intel_ddi.h              |    6 +-
  drivers/gpu/drm/i915/intel_device_info.c      |  164 +-
  drivers/gpu/drm/i915/intel_device_info.h      |    6 +-
  drivers/gpu/drm/i915/intel_display.c          | 2916 ++++++++---------
  drivers/gpu/drm/i915/intel_display.h          |    6 +-
  drivers/gpu/drm/i915/intel_display_power.c    | 1020 +++---
  drivers/gpu/drm/i915/intel_display_power.h    |   62 +-
  drivers/gpu/drm/i915/intel_dp.c               |  822 ++---
  drivers/gpu/drm/i915/intel_dp.h               |   18 +-
  drivers/gpu/drm/i915/intel_dp_aux_backlight.c |    4 +-
  drivers/gpu/drm/i915/intel_dp_mst.c           |   32 +-
  drivers/gpu/drm/i915/intel_dpio_phy.c         |  276 +-
  drivers/gpu/drm/i915/intel_dpio_phy.h         |   12 +-
  drivers/gpu/drm/i915/intel_dpll_mgr.c         |  356 +-
  drivers/gpu/drm/i915/intel_dpll_mgr.h         |   20 +-
  drivers/gpu/drm/i915/intel_drv.h              |   72 +-
  drivers/gpu/drm/i915/intel_dsi.c              |    6 +-
  drivers/gpu/drm/i915/intel_dsi.h              |    6 +-
  .../gpu/drm/i915/intel_dsi_dcs_backlight.c    |    4 +-
  drivers/gpu/drm/i915/intel_dsi_vbt.c          |   76 +-
  drivers/gpu/drm/i915/intel_dvo.c              |   32 +-
  drivers/gpu/drm/i915/intel_dvo.h              |    2 +-
  drivers/gpu/drm/i915/intel_dvo_dev.h          |    2 +-
  drivers/gpu/drm/i915/intel_fbc.c              |  410 +--
  drivers/gpu/drm/i915/intel_fbc.h              |   20 +-
  drivers/gpu/drm/i915/intel_fbdev.c            |   46 +-
  drivers/gpu/drm/i915/intel_fbdev.h            |    8 +-
  drivers/gpu/drm/i915/intel_fifo_underrun.c    |  160 +-
  drivers/gpu/drm/i915/intel_fifo_underrun.h    |   12 +-
  drivers/gpu/drm/i915/intel_frontbuffer.c      |   82 +-
  drivers/gpu/drm/i915/intel_frontbuffer.h      |    6 +-
  drivers/gpu/drm/i915/intel_gmbus.c            |  230 +-
  drivers/gpu/drm/i915/intel_gmbus.h            |   10 +-
  drivers/gpu/drm/i915/intel_guc.c              |   48 +-
  drivers/gpu/drm/i915/intel_guc_ads.c          |   12 +-
  drivers/gpu/drm/i915/intel_guc_fw.c           |   20 +-
  drivers/gpu/drm/i915/intel_guc_log.c          |   18 +-
  drivers/gpu/drm/i915/intel_guc_submission.c   |   56 +-
  drivers/gpu/drm/i915/intel_gvt.c              |   36 +-
  drivers/gpu/drm/i915/intel_gvt.h              |   16 +-
  drivers/gpu/drm/i915/intel_hdcp.c             |  286 +-
  drivers/gpu/drm/i915/intel_hdcp.h             |    6 +-
  drivers/gpu/drm/i915/intel_hdmi.c             |  316 +-
  drivers/gpu/drm/i915/intel_hdmi.h             |    2 +-
  drivers/gpu/drm/i915/intel_hotplug.c          |  210 +-
  drivers/gpu/drm/i915/intel_hotplug.h          |   16 +-
  drivers/gpu/drm/i915/intel_huc.c              |    6 +-
  drivers/gpu/drm/i915/intel_huc_fw.c           |   18 +-
  drivers/gpu/drm/i915/intel_lpe_audio.c        |   96 +-
  drivers/gpu/drm/i915/intel_lpe_audio.h        |    8 +-
  drivers/gpu/drm/i915/intel_lspcon.c           |    4 +-
  drivers/gpu/drm/i915/intel_lvds.c             |   96 +-
  drivers/gpu/drm/i915/intel_lvds.h             |    8 +-
  drivers/gpu/drm/i915/intel_opregion.c         |  114 +-
  drivers/gpu/drm/i915/intel_opregion.h         |   28 +-
  drivers/gpu/drm/i915/intel_overlay.c          |   98 +-
  drivers/gpu/drm/i915/intel_overlay.h          |    8 +-
  drivers/gpu/drm/i915/intel_panel.c            |  238 +-
  drivers/gpu/drm/i915/intel_pipe_crc.c         |  120 +-
  drivers/gpu/drm/i915/intel_pipe_crc.h         |    4 +-
  drivers/gpu/drm/i915/intel_pm.c               | 2300 ++++++-------
  drivers/gpu/drm/i915/intel_pm.h               |   74 +-
  drivers/gpu/drm/i915/intel_psr.c              |  394 +--
  drivers/gpu/drm/i915/intel_psr.h              |   14 +-
  drivers/gpu/drm/i915/intel_quirks.h           |    2 +-
  drivers/gpu/drm/i915/intel_runtime_pm.h       |    8 +-
  drivers/gpu/drm/i915/intel_sdvo.c             |  128 +-
  drivers/gpu/drm/i915/intel_sdvo.h             |    4 +-
  drivers/gpu/drm/i915/intel_sprite.c           |  218 +-
  drivers/gpu/drm/i915/intel_sprite.h           |    8 +-
  drivers/gpu/drm/i915/intel_tv.c               |   72 +-
  drivers/gpu/drm/i915/intel_tv.h               |    2 +-
  drivers/gpu/drm/i915/intel_uc.c               |    4 +-
  drivers/gpu/drm/i915/intel_uc.h               |   22 +-
  drivers/gpu/drm/i915/intel_uc_fw.c            |   12 +-
  drivers/gpu/drm/i915/intel_uc_fw.h            |    2 +-
  drivers/gpu/drm/i915/intel_uncore.c           |   26 +-
  drivers/gpu/drm/i915/intel_uncore.h           |    4 +-
  drivers/gpu/drm/i915/intel_vbt_defs.h         |    2 +-
  drivers/gpu/drm/i915/intel_vdsc.c             |   10 +-
  drivers/gpu/drm/i915/intel_wopcm.c            |   14 +-
  drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   26 +-
  drivers/gpu/drm/i915/selftests/intel_guc.c    |   46 +-
  drivers/gpu/drm/i915/selftests/intel_uncore.c |   10 +-
  drivers/gpu/drm/i915/vlv_dsi.c                |  222 +-
  drivers/gpu/drm/i915/vlv_dsi_pll.c            |   80 +-
  201 files changed, 12020 insertions(+), 12020 deletions(-)

diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 602380fe74f3..df670c8d5c31 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -210,7 +210,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = priv;
+	dvo->i915 = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
  		goto fail;
@@ -395,11 +395,11 @@ do {							\
static void ch7017_destroy(struct intel_dvo_device *dvo)
  {
-	struct ch7017_priv *priv = dvo->dev_priv;
+	struct ch7017_priv *priv = dvo->i915;
if (priv) {
  		kfree(priv);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index e070bebee7b5..e076801d032f 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -135,7 +135,7 @@ static char *ch7xxx_get_did(u8 did)
  /** Reads an 8 bit register */
  static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  {
-	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct ch7xxx_priv *ch7xxx = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	u8 in_buf[2];
@@ -173,7 +173,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  /** Writes an 8 bit register */
  static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
  {
-	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct ch7xxx_priv *ch7xxx = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	struct i2c_msg msg = {
@@ -210,7 +210,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = ch7xxx;
+	dvo->i915 = ch7xxx;
  	ch7xxx->quiet = true;
if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
@@ -347,11 +347,11 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
  {
-	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct ch7xxx_priv *ch7xxx = dvo->i915;
if (ch7xxx) {
  		kfree(ch7xxx);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 09dba35f3ffa..c99c16211567 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -191,7 +191,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
   */
  static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
  {
-	struct ivch_priv *priv = dvo->dev_priv;
+	struct ivch_priv *priv = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[1];
  	u8 in_buf[2];
@@ -234,7 +234,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
  /* Writes a 16-bit register on the ivch */
  static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
  {
-	struct ivch_priv *priv = dvo->dev_priv;
+	struct ivch_priv *priv = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[3];
  	struct i2c_msg msg = {
@@ -272,7 +272,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = priv;
+	dvo->i915 = priv;
  	priv->quiet = true;
if (!ivch_read(dvo, VR00, &temp))
@@ -328,7 +328,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
   */
  static void ivch_reset(struct intel_dvo_device *dvo)
  {
-	struct ivch_priv *priv = dvo->dev_priv;
+	struct ivch_priv *priv = dvo->i915;
  	int i;
DRM_DEBUG_KMS("Resetting the IVCH registers\n");
@@ -398,7 +398,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
  			  const struct drm_display_mode *mode,
  			  const struct drm_display_mode *adjusted_mode)
  {
-	struct ivch_priv *priv = dvo->dev_priv;
+	struct ivch_priv *priv = dvo->i915;
  	u16 vr40 = 0;
  	u16 vr01 = 0;
  	u16 vr10;
@@ -483,11 +483,11 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
static void ivch_destroy(struct intel_dvo_device *dvo)
  {
-	struct ivch_priv *priv = dvo->dev_priv;
+	struct ivch_priv *priv = dvo->i915;
if (priv) {
  		kfree(priv);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index c83a5d88d62b..af04fcaa98b8 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -392,7 +392,7 @@ struct ns2501_priv {
  */
  static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  {
-	struct ns2501_priv *ns = dvo->dev_priv;
+	struct ns2501_priv *ns = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	u8 in_buf[2];
@@ -437,7 +437,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  */
  static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
  {
-	struct ns2501_priv *ns = dvo->dev_priv;
+	struct ns2501_priv *ns = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
@@ -481,7 +481,7 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = ns;
+	dvo->i915 = ns;
  	ns->quiet = true;
if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
@@ -551,7 +551,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
  			    const struct drm_display_mode *adjusted_mode)
  {
  	const struct ns2501_configuration *conf;
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->i915);
  	int mode_idx, i;
DRM_DEBUG_KMS
@@ -655,7 +655,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
  /* set the NS2501 power state */
  static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
  {
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->i915);
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); @@ -691,11 +691,11 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) static void ns2501_destroy(struct intel_dvo_device *dvo)
  {
-	struct ns2501_priv *ns = dvo->dev_priv;
+	struct ns2501_priv *ns = dvo->i915;
if (ns) {
  		kfree(ns);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 04698eaeb632..a452dcba179c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -68,7 +68,7 @@ struct sil164_priv {
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  {
-	struct sil164_priv *sil = dvo->dev_priv;
+	struct sil164_priv *sil = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	u8 in_buf[2];
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
  {
-	struct sil164_priv *sil = dvo->dev_priv;
+	struct sil164_priv *sil = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	struct i2c_msg msg = {
@@ -142,7 +142,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = sil;
+	dvo->i915 = sil;
  	sil->quiet = true;
if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
@@ -260,11 +260,11 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
static void sil164_destroy(struct intel_dvo_device *dvo)
  {
-	struct sil164_priv *sil = dvo->dev_priv;
+	struct sil164_priv *sil = dvo->i915;
if (sil) {
  		kfree(sil);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 623114ee73cd..8ffed8914b49 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -93,7 +93,7 @@ struct tfp410_priv {
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
  {
-	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct tfp410_priv *tfp = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	u8 in_buf[2];
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
  {
-	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct tfp410_priv *tfp = dvo->i915;
  	struct i2c_adapter *adapter = dvo->i2c_bus;
  	u8 out_buf[2];
  	struct i2c_msg msg = {
@@ -178,7 +178,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
  		return false;
dvo->i2c_bus = adapter;
-	dvo->dev_priv = tfp;
+	dvo->i915 = tfp;
  	tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
@@ -299,11 +299,11 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
static void tfp410_destroy(struct intel_dvo_device *dvo)
  {
-	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct tfp410_priv *tfp = dvo->i915;
if (tfp) {
  		kfree(tfp);
-		dvo->dev_priv = NULL;
+		dvo->i915 = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index c86ca9f21532..2fa6747c7b6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -495,27 +495,27 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
  }
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
  {
  	struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
-	    !HAS_EXECLISTS(dev_priv))
+	    !HAS_EXECLISTS(i915))
  		return ERR_PTR(-EINVAL);
/* Reap the most stale context */
-	contexts_free_first(dev_priv);
+	contexts_free_first(i915);
- ctx = __create_context(dev_priv);
+	ctx = __create_context(i915);
  	if (IS_ERR(ctx))
  		return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+	if (HAS_FULL_PPGTT(i915)) {
  		struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+		ppgtt = i915_ppgtt_create(i915);
  		if (IS_ERR(ppgtt)) {
  			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
  					 PTR_ERR(ppgtt));
@@ -530,7 +530,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
  	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
  		struct i915_timeline *timeline;
- timeline = i915_timeline_create(dev_priv, NULL);
+		timeline = i915_timeline_create(i915, NULL);
  		if (IS_ERR(timeline)) {
  			context_close(ctx);
  			return ERR_CAST(timeline);
@@ -649,19 +649,19 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
  	return HAS_EXECLISTS(i915);
  }
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_contexts_init(struct drm_i915_private *i915)
  {
  	struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
-	GEM_BUG_ON(dev_priv->kernel_context);
-	GEM_BUG_ON(dev_priv->preempt_context);
+	GEM_BUG_ON(i915->kernel_context);
+	GEM_BUG_ON(i915->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
-	init_contexts(dev_priv);
+	intel_engine_init_ctx_wa(i915->engine[RCS0]);
+	init_contexts(i915);
/* lowest priority; idle task */
-	ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
  	if (IS_ERR(ctx)) {
  		DRM_ERROR("Failed to create default global context\n");
  		return PTR_ERR(ctx);
@@ -675,31 +675,31 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
  	 */
  	GEM_BUG_ON(ctx->hw_id);
  	GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
-	dev_priv->kernel_context = ctx;
+	i915->kernel_context = ctx;
/* highest priority; preempting task */
-	if (needs_preempt_context(dev_priv)) {
-		ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
+	if (needs_preempt_context(i915)) {
+		ctx = i915_gem_context_create_kernel(i915, INT_MAX);
  		if (!IS_ERR(ctx))
-			dev_priv->preempt_context = ctx;
+			i915->preempt_context = ctx;
  		else
  			DRM_ERROR("Failed to create preempt context; disabling preemption\n");
  	}
DRM_DEBUG_DRIVER("%s context support initialized\n",
-			 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+			 DRIVER_CAPS(i915)->has_logical_contexts ?
  			 "logical" : "fake");
  	return 0;
  }
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
+void i915_gem_contexts_lost(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		intel_engine_lost_context(engine);
  }
@@ -2368,7 +2368,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
  int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
  				       void *data, struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_reset_stats *args = data;
  	struct i915_gem_context *ctx;
  	int ret;
@@ -2390,7 +2390,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
  	 */
if (capable(CAP_SYS_ADMIN))
-		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+		args->reset_count = i915_reset_count(&i915->gpu_error);
  	else
  		args->reset_count = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 630392c77e48..f55cb8eae87a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -133,9 +133,9 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
  }
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_contexts_init(struct drm_i915_private *i915);
+void i915_gem_contexts_lost(struct drm_i915_private *i915);
+void i915_gem_contexts_fini(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
  			  struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 528eea44dccf..83d64580a2f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2099,7 +2099,7 @@ static int eb_submit(struct i915_execbuffer *eb)
   * The engine index is returned.
   */
  static unsigned int
-gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+gen8_dispatch_bsd_engine(struct drm_i915_private *i915,
  			 struct drm_file *file)
  {
  	struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2107,7 +2107,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
  	/* Check whether the file_priv has already selected one ring. */
  	if ((int)file_priv->bsd_engine < 0)
  		file_priv->bsd_engine = atomic_fetch_xor(1,
-			 &dev_priv->mm.bsd_engine_dispatch_index);
+			 &i915->mm.bsd_engine_dispatch_index);
return file_priv->bsd_engine;
  }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 36b76c6a0a9d..a37f0adac16e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -367,7 +367,7 @@ void
  i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
  				   unsigned int flush_domains)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct i915_vma *vma;
assert_object_held(obj);
@@ -377,7 +377,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
  	case I915_GEM_DOMAIN_GTT:
-		i915_gem_flush_ggtt_writes(dev_priv);
+		i915_gem_flush_ggtt_writes(i915);
intel_fb_obj_flush(obj,
  				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19d9ecdb2894..eb4aae65f6e4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -521,7 +521,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
/* Allocate a new GEM object and fill it with the supplied data */
  struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
  				       const void *data, size_t size)
  {
  	struct drm_i915_gem_object *obj;
@@ -529,7 +529,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
  	size_t offset;
  	int err;
- obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+	obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
  	if (IS_ERR(obj))
  		return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index de1fab2058ec..24e174f069f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -24,48 +24,48 @@
   * for is a boon.
   */
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
  					 struct drm_mm_node *node, u64 size,
  					 unsigned alignment, u64 start, u64 end)
  {
  	int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
  		return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+	if (INTEL_GEN(i915) >= 8 && start < 4096)
  		start = 4096;
- mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+	mutex_lock(&i915->mm.stolen_lock);
+	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
  					  size, alignment, 0,
  					  start, end, DRM_MM_INSERT_BEST);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_unlock(&i915->mm.stolen_lock);
return ret;
  }
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
  				struct drm_mm_node *node, u64 size,
  				unsigned alignment)
  {
-	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+	return i915_gem_stolen_insert_node_in_range(i915, node, size,
  						    alignment, 0, U64_MAX);
  }
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
  				 struct drm_mm_node *node)
  {
-	mutex_lock(&dev_priv->mm.stolen_lock);
+	mutex_lock(&i915->mm.stolen_lock);
  	drm_mm_remove_node(node);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_unlock(&i915->mm.stolen_lock);
  }
-static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+static int i915_adjust_stolen(struct drm_i915_private *i915,
  			      struct resource *dsm)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	struct resource *r;
if (dsm->start == 0 || dsm->end <= dsm->start)
@@ -77,14 +77,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  	 */
/* Make sure we don't clobber the GTT if it's within stolen memory */
-	if (INTEL_GEN(dev_priv) <= 4 &&
-	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
+	if (INTEL_GEN(i915) <= 4 &&
+	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
  		struct resource stolen[2] = {*dsm, *dsm};
  		struct resource ggtt_res;
  		resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
-		if (IS_GEN(dev_priv, 4))
+		if (IS_GEN(i915, 4))
  			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
  				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
  		else
@@ -118,7 +118,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  	 * kernel. So if the region is already marked as busy, something
  	 * is seriously wrong.
  	 */
-	r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+	r = devm_request_mem_region(i915->drm.dev, dsm->start,
  				    resource_size(dsm),
  				    "Graphics Stolen Memory");
  	if (r == NULL) {
@@ -131,14 +131,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  		 * reservation starting from 1 instead of 0.
  		 * There's also BIOS with off-by-one on the other end.
  		 */
-		r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
  					    resource_size(dsm) - 2,
  					    "Graphics Stolen Memory");
  		/*
  		 * GEN3 firmware likes to smash pci bridges into the stolen
  		 * range. Apparently this works.
  		 */
-		if (r == NULL && !IS_GEN(dev_priv, 3)) {
+		if (r == NULL && !IS_GEN(i915, 3)) {
  			DRM_ERROR("conflict detected with stolen region: %pR\n",
  				  dsm);
@@ -149,25 +149,25 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  	return 0;
  }
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
  {
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
  		return;
- drm_mm_takedown(&dev_priv->mm.stolen);
+	drm_mm_takedown(&i915->mm.stolen);
  }
-static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
  				    resource_size_t *base,
  				    resource_size_t *size)
  {
-	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
+	u32 reg_val = I915_READ(IS_GM45(i915) ?
  				CTG_STOLEN_RESERVED :
  				ELK_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
-			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+			 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
  		return;
@@ -176,7 +176,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	 * Whether ILK really reuses the ELK register for this is unclear.
  	 * Let's see if we catch anyone with this supposedly enabled on ILK.
  	 */
-	WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+	WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
  	     reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
@@ -188,7 +188,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	*size = stolen_top - *base;
  }
-static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
  				     resource_size_t *base,
  				     resource_size_t *size)
  {
@@ -220,12 +220,12 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	}
  }
-static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
  				    resource_size_t *base,
  				    resource_size_t *size)
  {
  	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -248,7 +248,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	*base = stolen_top - *size;
  }
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
  				     resource_size_t *base,
  				     resource_size_t *size)
  {
@@ -274,7 +274,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	}
  }
-static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void chv_get_stolen_reserved(struct drm_i915_private *i915,
  				    resource_size_t *base,
  				    resource_size_t *size)
  {
@@ -306,12 +306,12 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
  	}
  }
-static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
  				    resource_size_t *base,
  				    resource_size_t *size)
  {
  	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -354,19 +354,19 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
  	}
  }
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+int i915_gem_init_stolen(struct drm_i915_private *i915)
  {
  	resource_size_t reserved_base, stolen_top;
  	resource_size_t reserved_total, reserved_size;
- mutex_init(&dev_priv->mm.stolen_lock);
+	mutex_init(&i915->mm.stolen_lock);
- if (intel_vgpu_active(dev_priv)) {
+	if (intel_vgpu_active(i915)) {
  		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
  		return 0;
  	}
- if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
+	if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
  		DRM_INFO("DMAR active, disabling use of stolen memory\n");
  		return 0;
  	}
@@ -374,55 +374,55 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
  	if (resource_size(&intel_graphics_stolen_res) == 0)
  		return 0;
- dev_priv->dsm = intel_graphics_stolen_res;
+	i915->dsm = intel_graphics_stolen_res;
- if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
+	if (i915_adjust_stolen(i915, &i915->dsm))
  		return 0;
- GEM_BUG_ON(dev_priv->dsm.start == 0);
-	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+	GEM_BUG_ON(i915->dsm.start == 0);
+	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
- stolen_top = dev_priv->dsm.end + 1;
+	stolen_top = i915->dsm.end + 1;
  	reserved_base = stolen_top;
  	reserved_size = 0;
- switch (INTEL_GEN(dev_priv)) {
+	switch (INTEL_GEN(i915)) {
  	case 2:
  	case 3:
  		break;
  	case 4:
-		if (!IS_G4X(dev_priv))
+		if (!IS_G4X(i915))
  			break;
  		/* fall through */
  	case 5:
-		g4x_get_stolen_reserved(dev_priv,
+		g4x_get_stolen_reserved(i915,
  					&reserved_base, &reserved_size);
  		break;
  	case 6:
-		gen6_get_stolen_reserved(dev_priv,
+		gen6_get_stolen_reserved(i915,
  					 &reserved_base, &reserved_size);
  		break;
  	case 7:
-		if (IS_VALLEYVIEW(dev_priv))
-			vlv_get_stolen_reserved(dev_priv,
+		if (IS_VALLEYVIEW(i915))
+			vlv_get_stolen_reserved(i915,
  						&reserved_base, &reserved_size);
  		else
-			gen7_get_stolen_reserved(dev_priv,
+			gen7_get_stolen_reserved(i915,
  						 &reserved_base, &reserved_size);
  		break;
  	case 8:
  	case 9:
  	case 10:
-		if (IS_LP(dev_priv))
-			chv_get_stolen_reserved(dev_priv,
+		if (IS_LP(i915))
+			chv_get_stolen_reserved(i915,
  						&reserved_base, &reserved_size);
  		else
-			bdw_get_stolen_reserved(dev_priv,
+			bdw_get_stolen_reserved(i915,
  						&reserved_base, &reserved_size);
  		break;
  	case 11:
  	default:
-		icl_get_stolen_reserved(dev_priv, &reserved_base,
+		icl_get_stolen_reserved(i915, &reserved_base,
  					&reserved_size);
  		break;
  	}
@@ -439,12 +439,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
  		reserved_size = 0;
  	}
- dev_priv->dsm_reserved =
+	i915->dsm_reserved =
  		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
  		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
-			  &dev_priv->dsm_reserved, &dev_priv->dsm);
+			  &i915->dsm_reserved, &i915->dsm);
  		return 0;
  	}
@@ -453,14 +453,14 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
  	reserved_total = stolen_top - reserved_base;
DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
-			 (u64)resource_size(&dev_priv->dsm) >> 10,
-			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+			 (u64)resource_size(&i915->dsm) >> 10,
+			 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
- dev_priv->stolen_usable_size =
-		resource_size(&dev_priv->dsm) - reserved_total;
+	i915->stolen_usable_size =
+		resource_size(&i915->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
+	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
return 0;
  }
@@ -469,11 +469,11 @@ static struct sg_table *
  i915_pages_create_for_stolen(struct drm_device *dev,
  			     resource_size_t offset, resource_size_t size)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct sg_table *st;
  	struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
+	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
/* We hide that we have no struct page backing our stolen object
  	 * by wrapping the contiguous physical allocation with a fake
@@ -493,7 +493,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
  	sg->offset = 0;
  	sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
+	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
  	sg_dma_len(sg) = size;
return st;
@@ -524,14 +524,14 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
  static void
  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen); __i915_gem_object_unpin_pages(obj); - i915_gem_stolen_remove_node(dev_priv, stolen);
+	i915_gem_stolen_remove_node(i915, stolen);
  	kfree(stolen);
  }
@@ -542,7 +542,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
  };
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+_i915_gem_object_create_stolen(struct drm_i915_private *i915,
  			       struct drm_mm_node *stolen)
  {
  	struct drm_i915_gem_object *obj;
@@ -552,12 +552,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  	if (obj == NULL)
  		return NULL;
- drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
+	drm_gem_private_object_init(&i915->drm, &obj->base, stolen->size);
  	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
  	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
-	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
  	i915_gem_object_set_cache_coherency(obj, cache_level);
if (i915_gem_object_pin_pages(obj))
@@ -571,14 +571,14 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  }
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
  			      resource_size_t size)
  {
  	struct drm_i915_gem_object *obj;
  	struct drm_mm_node *stolen;
  	int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
  		return NULL;
if (size == 0)
@@ -588,37 +588,37 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  	if (!stolen)
  		return NULL;
- ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
  	if (ret) {
  		kfree(stolen);
  		return NULL;
  	}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+	obj = _i915_gem_object_create_stolen(i915, stolen);
  	if (obj)
  		return obj;
- i915_gem_stolen_remove_node(dev_priv, stolen);
+	i915_gem_stolen_remove_node(i915, stolen);
  	kfree(stolen);
  	return NULL;
  }
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
  					       resource_size_t stolen_offset,
  					       resource_size_t gtt_offset,
  					       resource_size_t size)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	struct drm_i915_gem_object *obj;
  	struct drm_mm_node *stolen;
  	struct i915_vma *vma;
  	int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
  		return NULL;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
  			 &stolen_offset, &gtt_offset, &size);
@@ -635,19 +635,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
stolen->start = stolen_offset;
  	stolen->size = size;
-	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_lock(&i915->mm.stolen_lock);
+	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+	mutex_unlock(&i915->mm.stolen_lock);
  	if (ret) {
  		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
  		kfree(stolen);
  		return NULL;
  	}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+	obj = _i915_gem_object_create_stolen(i915, stolen);
  	if (obj == NULL) {
  		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
-		i915_gem_stolen_remove_node(dev_priv, stolen);
+		i915_gem_stolen_remove_node(i915, stolen);
  		kfree(stolen);
  		return NULL;
  	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..46b523d4ed84 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -398,7 +398,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
  			  struct drm_file *file)
  {
  	struct drm_i915_gem_get_tiling *args = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_object *obj;
  	int err = -ENOENT;
@@ -415,10 +415,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, switch (args->tiling_mode) {
  	case I915_TILING_X:
-		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		args->swizzle_mode = i915->mm.bit_6_swizzle_x;
  		break;
  	case I915_TILING_Y:
-		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		args->swizzle_mode = i915->mm.bit_6_swizzle_y;
  		break;
  	default:
  	case I915_TILING_NONE:
@@ -427,7 +427,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
  	}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
-	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
  		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
  	else
  		args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..f22e10e8895a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -327,12 +327,12 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
  #endif
static struct i915_mm_struct *
-__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
  {
  	struct i915_mm_struct *mm;
- /* Protected by dev_priv->mm_lock */
-	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+	/* Protected by i915->mm_lock */
+	hash_for_each_possible(i915->mm_structs, mm, node, (unsigned long)real)
  		if (mm->mm == real)
  			return mm;
@@ -342,7 +342,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
  static int
  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct i915_mm_struct *mm;
  	int ret = 0;
@@ -356,8 +356,8 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
  	 * struct_mutex, i.e. we need to schedule a worker to do the clean
  	 * up.
  	 */
-	mutex_lock(&dev_priv->mm_lock);
-	mm = __i915_mm_struct_find(dev_priv, current->mm);
+	mutex_lock(&i915->mm_lock);
+	mm = __i915_mm_struct_find(i915, current->mm);
  	if (mm == NULL) {
  		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
  		if (mm == NULL) {
@@ -373,15 +373,15 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
mm->mn = NULL; - /* Protected by dev_priv->mm_lock */
-		hash_add(dev_priv->mm_structs,
+		/* Protected by i915->mm_lock */
+		hash_add(i915->mm_structs,
  			 &mm->node, (unsigned long)mm->mm);
  	} else
  		kref_get(&mm->kref);
obj->userptr.mm = mm;
  out:
-	mutex_unlock(&dev_priv->mm_lock);
+	mutex_unlock(&i915->mm_lock);
  	return ret;
  }
@@ -399,7 +399,7 @@ __i915_mm_struct_free(struct kref *kref)
  {
  	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
- /* Protected by dev_priv->mm_lock */
+	/* Protected by i915->mm_lock */
  	hash_del(&mm->node);
  	mutex_unlock(&mm->i915->mm_lock);
@@ -741,13 +741,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
  		       void *data,
  		       struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_userptr *args = data;
  	struct drm_i915_gem_object *obj;
  	int ret;
  	u32 handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+	if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
  		/* We cannot support coherent userptr objects on hw without
  		 * LLC and broken snooping.
  		 */
@@ -774,7 +774,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
  		 * On almost all of the older hw, we cannot tell the GPU that
  		 * a page is readonly.
  		 */
-		vm = dev_priv->kernel_context->vm;
+		vm = i915->kernel_context->vm;
  		if (!vm || !vm->has_read_only)
  			return -ENODEV;
  	}
@@ -812,22 +812,22 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
  	return 0;
  }
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
+int i915_gem_init_userptr(struct drm_i915_private *i915)
  {
-	mutex_init(&dev_priv->mm_lock);
-	hash_init(dev_priv->mm_structs);
+	mutex_init(&i915->mm_lock);
+	hash_init(i915->mm_structs);
- dev_priv->mm.userptr_wq =
+	i915->mm.userptr_wq =
  		alloc_workqueue("i915-userptr-acquire",
  				WQ_HIGHPRI | WQ_UNBOUND,
  				0);
-	if (!dev_priv->mm.userptr_wq)
+	if (!i915->mm.userptr_wq)
  		return -ENOMEM;
return 0;
  }
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
+void i915_gem_cleanup_userptr(struct drm_i915_private *i915)
  {
-	destroy_workqueue(dev_priv->mm.userptr_wq);
+	destroy_workqueue(i915->mm.userptr_wq);
  }
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 73e667b31cc4..78797699feb0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1415,8 +1415,8 @@ static int igt_ppgtt_gemfs_huge(void *arg)
  static int igt_ppgtt_pin_update(void *arg)
  {
  	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *dev_priv = ctx->i915;
-	unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
+	struct drm_i915_private *i915 = ctx->i915;
+	unsigned long supported = INTEL_INFO(i915)->page_sizes;
  	struct i915_address_space *vm = ctx->vm;
  	struct drm_i915_gem_object *obj;
  	struct i915_vma *vma;
@@ -1443,7 +1443,7 @@ static int igt_ppgtt_pin_update(void *arg)
  	for_each_set_bit_from(first, &supported, last + 1) {
  		unsigned int page_size = BIT(first);
- obj = i915_gem_object_create_internal(dev_priv, page_size);
+		obj = i915_gem_object_create_internal(i915, page_size);
  		if (IS_ERR(obj))
  			return PTR_ERR(obj);
@@ -1497,7 +1497,7 @@ static int igt_ppgtt_pin_update(void *arg)
  		i915_gem_object_put(obj);
  	}
- obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  	if (IS_ERR(obj))
  		return PTR_ERR(obj);
@@ -1518,7 +1518,7 @@ static int igt_ppgtt_pin_update(void *arg)
  	 * land in the now stale 2M page.
  	 */
- err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
+	err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
  	if (err)
  		goto out_unpin;
@@ -1682,20 +1682,20 @@ int i915_gem_huge_page_mock_selftests(void)
  		SUBTEST(igt_mock_ppgtt_huge_fill),
  		SUBTEST(igt_mock_ppgtt_64K),
  	};
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	struct i915_ppgtt *ppgtt;
  	int err;
- dev_priv = mock_gem_device();
-	if (!dev_priv)
+	i915 = mock_gem_device();
+	if (!i915)
  		return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
-	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
-	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
+	mkwrite_device_info(i915)->ppgtt_type = INTEL_PPGTT_FULL;
+	mkwrite_device_info(i915)->ppgtt_size = 48;
- mutex_lock(&dev_priv->drm.struct_mutex);
-	ppgtt = i915_ppgtt_create(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	ppgtt = i915_ppgtt_create(i915);
  	if (IS_ERR(ppgtt)) {
  		err = PTR_ERR(ppgtt);
  		goto out_unlock;
@@ -1720,13 +1720,13 @@ int i915_gem_huge_page_mock_selftests(void)
  	i915_vm_put(&ppgtt->vm);
out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	drm_dev_put(&dev_priv->drm);
+	mutex_unlock(&i915->drm.struct_mutex);
+	drm_dev_put(&i915->drm);
return err;
  }
-int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
  {
  	static const struct i915_subtest tests[] = {
  		SUBTEST(igt_shrink_thp),
@@ -1741,22 +1741,22 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
  	intel_wakeref_t wakeref;
  	int err;
- if (!HAS_PPGTT(dev_priv)) {
+	if (!HAS_PPGTT(i915)) {
  		pr_info("PPGTT not supported, skipping live-selftests\n");
  		return 0;
  	}
- if (i915_terminally_wedged(dev_priv))
+	if (i915_terminally_wedged(i915))
  		return 0;
- file = mock_file(dev_priv);
+	file = mock_file(i915);
  	if (IS_ERR(file))
  		return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
-	wakeref = intel_runtime_pm_get(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(i915);
- ctx = live_context(dev_priv, file);
+	ctx = live_context(i915, file);
  	if (IS_ERR(ctx)) {
  		err = PTR_ERR(ctx);
  		goto out_unlock;
@@ -1768,10 +1768,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
  	err = i915_subtests(tests, ctx);
out_unlock:
-	intel_runtime_pm_put(dev_priv, wakeref);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_runtime_pm_put(i915, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(dev_priv, file);
+	mock_file_free(i915, file);
return err;
  }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 74b0e5871c4b..2fdcaa1667bf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1748,7 +1748,7 @@ int i915_gem_context_mock_selftests(void)
  	return err;
  }
-int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_context_live_selftests(struct drm_i915_private *i915)
  {
  	static const struct i915_subtest tests[] = {
  		SUBTEST(live_nop_switch),
@@ -1759,8 +1759,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
  		SUBTEST(igt_vm_isolation),
  	};
- if (i915_terminally_wedged(dev_priv))
+	if (i915_terminally_wedged(i915))
  		return 0;
- return i915_subtests(tests, dev_priv);
+	return i915_subtests(tests, i915);
  }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 1439fa4093ac..74f9ad6648eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -465,7 +465,7 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
  }
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+bool intel_engines_are_idle(struct drm_i915_private *i915);
void intel_engine_lost_context(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c0d986db5a75..a048b8743ce6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -160,7 +160,7 @@ static const struct engine_info intel_engines[] = {
/**
   * intel_engine_context_size() - return the size of the context for an engine
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   * @class: engine class
   *
   * Each engine class may require a different amount of space for a context
@@ -172,7 +172,7 @@ static const struct engine_info intel_engines[] = {
   * in LRC mode, but does not include the "shared data page" used with
   * GuC submission. The caller should account for this if using the GuC.
   */
-u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class)
  {
  	u32 cxt_size;
@@ -180,9 +180,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) switch (class) {
  	case RENDER_CLASS:
-		switch (INTEL_GEN(dev_priv)) {
+		switch (INTEL_GEN(i915)) {
  		default:
-			MISSING_CASE(INTEL_GEN(dev_priv));
+			MISSING_CASE(INTEL_GEN(i915));
  			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
  		case 11:
  			return GEN11_LR_CONTEXT_RENDER_SIZE;
@@ -193,7 +193,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
  		case 8:
  			return GEN8_LR_CONTEXT_RENDER_SIZE;
  		case 7:
-			if (IS_HASWELL(dev_priv))
+			if (IS_HASWELL(i915))
  				return HSW_CXT_TOTAL_SIZE;
cxt_size = I915_READ(GEN7_CXT_SIZE);
@@ -217,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
  			 */
  			cxt_size = I915_READ(CXT_SIZE) + 1;
  			DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
-					 INTEL_GEN(dev_priv),
+					 INTEL_GEN(i915),
  					 cxt_size * 64,
  					 cxt_size - 1);
  			return round_up(cxt_size * 64, PAGE_SIZE);
@@ -234,7 +234,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
  	case VIDEO_DECODE_CLASS:
  	case VIDEO_ENHANCEMENT_CLASS:
  	case COPY_ENGINE_CLASS:
-		if (INTEL_GEN(dev_priv) < 8)
+		if (INTEL_GEN(i915) < 8)
  			return 0;
  		return GEN8_LR_CONTEXT_OTHER_SIZE;
  	}
@@ -284,7 +284,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
  }
static int
-intel_engine_setup(struct drm_i915_private *dev_priv,
+intel_engine_setup(struct drm_i915_private *i915,
  		   enum intel_engine_id id)
  {
  	const struct engine_info *info = &intel_engines[id];
@@ -301,10 +301,10 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
  	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
  		return -EINVAL;
- if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+	if (GEM_DEBUG_WARN_ON(i915->engine_class[info->class][info->instance]))
  		return -EINVAL;
- GEM_BUG_ON(dev_priv->engine[id]);
+	GEM_BUG_ON(i915->engine[id]);
  	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
  	if (!engine)
  		return -ENOMEM;
@@ -313,11 +313,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
  	engine->mask = BIT(id);
-	engine->i915 = dev_priv;
-	engine->uncore = &dev_priv->uncore;
+	engine->i915 = i915;
+	engine->uncore = &i915->uncore;
  	__sprint_engine_name(engine->name, info);
  	engine->hw_id = engine->guc_id = info->hw_id;
-	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
+	engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
  	engine->class = info->class;
  	engine->instance = info->instance;
@@ -329,12 +329,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->uabi_class = intel_engine_classes[info->class].uabi_class; - engine->context_size = intel_engine_context_size(dev_priv,
+	engine->context_size = intel_engine_context_size(i915,
  							 engine->class);
  	if (WARN_ON(engine->context_size > BIT(20)))
  		engine->context_size = 0;
  	if (engine->context_size)
-		DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+		DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
  	engine->schedule = NULL;
@@ -346,8 +346,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
  	/* Scrub mmio state on takeover */
  	intel_engine_sanitize_mmio(engine);
- dev_priv->engine_class[info->class][info->instance] = engine;
-	dev_priv->engine[id] = engine;
+	i915->engine_class[info->class][info->instance] = engine;
+	i915->engine[id] = engine;
  	return 0;
  }
@@ -953,17 +953,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
  	}
  }
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *i915)
  {
-	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	u32 mcr_s_ss_select;
  	u32 slice = fls(sseu->slice_mask);
  	u32 subslice = fls(sseu->subslice_mask[slice]);
- if (IS_GEN(dev_priv, 10))
+	if (IS_GEN(i915, 10))
  		mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
  				  GEN8_MCR_SUBSLICE(subslice);
-	else if (INTEL_GEN(dev_priv) >= 11)
+	else if (INTEL_GEN(i915) >= 11)
  		mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
  				  GEN11_MCR_SUBSLICE(subslice);
  	else
@@ -1095,7 +1095,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	intel_wakeref_t wakeref;
  	bool idle = true;
@@ -1103,7 +1103,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
  		return true;
/* If the whole device is asleep, the engine must be idle */
-	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+	wakeref = intel_runtime_pm_get_if_in_use(i915);
  	if (!wakeref)
  		return true;
@@ -1113,11 +1113,11 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
  		idle = false;
/* No bit for gen2, so assume the CS parser is idle */
-	if (INTEL_GEN(dev_priv) > 2 &&
+	if (INTEL_GEN(i915) > 2 &&
  	    !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
  		idle = false;
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return idle;
  }
@@ -1321,12 +1321,12 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
  static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  					 struct drm_printer *m)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	const struct intel_engine_execlists * const execlists =
  		&engine->execlists;
  	u64 addr;
- if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+	if (engine->id == RCS0 && IS_GEN_RANGE(i915, 4, 7))
  		drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
  	drm_printf(m, "\tRING_START: 0x%08x\n",
  		   ENGINE_READ(engine, RING_START));
@@ -1343,7 +1343,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  			   ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
  	}
- if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(i915) >= 6) {
  		drm_printf(m, "\tRING_IMR: %08x\n",
  			   ENGINE_READ(engine, RING_IMR));
  	}
@@ -1354,15 +1354,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  	addr = intel_engine_get_last_batch_head(engine);
  	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
  		   upper_32_bits(addr), lower_32_bits(addr));
-	if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
-	else if (INTEL_GEN(dev_priv) >= 4)
+	else if (INTEL_GEN(i915) >= 4)
  		addr = ENGINE_READ(engine, RING_DMA_FADD);
  	else
  		addr = ENGINE_READ(engine, DMA_FADD_I8XX);
  	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
  		   upper_32_bits(addr), lower_32_bits(addr));
-	if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		drm_printf(m, "\tIPEIR: 0x%08x\n",
  			   ENGINE_READ(engine, RING_IPEIR));
  		drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -1372,7 +1372,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  		drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
  	}
- if (HAS_EXECLISTS(dev_priv)) {
+	if (HAS_EXECLISTS(i915)) {
  		const u32 *hws =
  			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
  		const u8 num_entries = execlists->csb_size;
@@ -1426,7 +1426,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  		}
  		drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
  		rcu_read_unlock();
-	} else if (INTEL_GEN(dev_priv) > 6) {
+	} else if (INTEL_GEN(i915) > 6) {
  		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
  			   ENGINE_READ(engine, RING_PP_DIR_BASE));
  		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 174bb0a60309..948423c5f67d 100644
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -50,7 +50,7 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
static bool subunits_stuck(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	struct intel_instdone instdone;
  	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
  	bool stuck;
@@ -72,7 +72,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
  	stuck &= instdone_unchanged(instdone.slice_common,
  				    &accu_instdone->slice_common);
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+	for_each_instdone_slice_subslice(i915, slice, subslice) {
  		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
  					    &accu_instdone->sampler[slice][subslice]);
  		stuck &= instdone_unchanged(instdone.row[slice][subslice],
@@ -103,7 +103,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
  static enum intel_engine_hangcheck_action
  engine_stuck(struct intel_engine_cs *engine, u64 acthd)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	enum intel_engine_hangcheck_action ha;
  	u32 tmp;
@@ -111,7 +111,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
  	if (ha != ENGINE_DEAD)
  		return ha;
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -121,7 +121,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
  	 */
  	tmp = ENGINE_READ(engine, RING_CTL);
  	if (tmp & RING_WAIT) {
-		i915_handle_error(dev_priv, engine->mask, 0,
+		i915_handle_error(i915, engine->mask, 0,
  				  "stuck wait on %s", engine->name);
  		ENGINE_WRITE(engine, RING_CTL, tmp);
  		return ENGINE_WAIT_KICK;
@@ -256,8 +256,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
   */
  static void i915_hangcheck_elapsed(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915),
  			     gpu_error.hangcheck_work.work);
  	intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
  	struct intel_engine_cs *engine;
@@ -267,13 +267,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
  	if (!i915_modparams.enable_hangcheck)
  		return;
- if (!READ_ONCE(dev_priv->gt.awake))
+	if (!READ_ONCE(i915->gt.awake))
  		return;
- if (i915_terminally_wedged(dev_priv))
+	if (i915_terminally_wedged(i915))
  		return;
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+	wakeref = intel_runtime_pm_get_if_in_use(i915);
  	if (!wakeref)
  		return;
@@ -281,9 +281,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
  	 * periodically arm the mmio checker to see if we are triggering
  	 * any invalid access.
  	 */
-	intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+	intel_uncore_arm_unclaimed_mmio_detection(&i915->uncore);
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		struct hangcheck hc;
intel_engine_signal_breadcrumbs(engine);
@@ -305,7 +305,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
  	if (GEM_SHOW_DEBUG() && (hung | stuck)) {
  		struct drm_printer p = drm_debug_printer("hangcheck");
- for_each_engine(engine, dev_priv, id) {
+		for_each_engine(engine, i915, id) {
  			if (intel_engine_is_idle(engine))
  				continue;
@@ -314,20 +314,20 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
  	}
if (wedged) {
-		dev_err(dev_priv->drm.dev,
+		dev_err(i915->drm.dev,
  			"GPU recovery timed out,"
  			" cancelling all in-flight rendering.\n");
  		GEM_TRACE_DUMP();
-		i915_gem_set_wedged(dev_priv);
+		i915_gem_set_wedged(i915);
  	}
if (hung)
-		hangcheck_declare_hang(dev_priv, hung, stuck);
+		hangcheck_declare_hang(i915, hung, stuck);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
/* Reset timer in case GPU hangs without another request being added */
-	i915_queue_hangcheck(dev_priv);
+	i915_queue_hangcheck(i915);
  }
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 79df66022d3a..9d6f0893f528 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -239,7 +239,7 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
/**
   * get_mocs_settings()
- * @dev_priv:	i915 device.
+ * @i915:	i915 device.
   * @table:      Output table that will be made to point at appropriate
   *	      MOCS values for the device.
   *
@@ -249,33 +249,33 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
   *
   * Return: true if there are applicable MOCS settings for the device.
   */
-static bool get_mocs_settings(struct drm_i915_private *dev_priv,
+static bool get_mocs_settings(struct drm_i915_private *i915,
  			      struct drm_i915_mocs_table *table)
  {
  	bool result = false;
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		table->size  = ARRAY_SIZE(icelake_mocs_table);
  		table->table = icelake_mocs_table;
  		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
  		result = true;
-	} else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
  		table->size  = ARRAY_SIZE(skylake_mocs_table);
  		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
  		table->table = skylake_mocs_table;
  		result = true;
-	} else if (IS_GEN9_LP(dev_priv)) {
+	} else if (IS_GEN9_LP(i915)) {
  		table->size  = ARRAY_SIZE(broxton_mocs_table);
  		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
  		table->table = broxton_mocs_table;
  		result = true;
  	} else {
-		WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
+		WARN_ONCE(INTEL_GEN(i915) >= 9,
  			  "Platform that should have a MOCS table does not.\n");
  	}
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
-	if (IS_GEN(dev_priv, 9)) {
+	if (IS_GEN(i915, 9)) {
  		int i;
for (i = 0; i < table->size; i++)
@@ -330,12 +330,12 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
   */
  void intel_mocs_init_engine(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	struct drm_i915_mocs_table table;
  	unsigned int index;
  	u32 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+	if (!get_mocs_settings(i915, &table))
  		return;
/* Set unused values to PTE */
@@ -482,7 +482,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
/**
   * intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev_priv:      i915 device private
+ * @i915:      i915 device private
   *
   * This function simply programs the mocs registers for the given table
   * starting at the given address. This register set is  programmed in pairs.
@@ -494,13 +494,13 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
   *
   * Return: Nothing.
   */
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
+void intel_mocs_init_l3cc_table(struct drm_i915_private *i915)
  {
  	struct drm_i915_mocs_table table;
  	unsigned int i;
  	u16 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+	if (!get_mocs_settings(i915, &table))
  		return;
/* Set unused values to PTE */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 0913704a1af2..6f8973fbcb3e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -54,7 +54,7 @@ struct i915_request;
  struct intel_engine_cs;
int intel_rcs_context_init_mocs(struct i915_request *rq);
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
+void intel_mocs_init_l3cc_table(struct drm_i915_private *i915);
  void intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 6368b37f26d1..d5cd3cccb407 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1208,13 +1208,13 @@ static void clear_error_registers(struct drm_i915_private *i915,
  	}
  }
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
+static void gen6_check_faults(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	u32 fault;
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		fault = GEN6_RING_FAULT_REG_READ(engine);
  		if (fault & RING_FAULT_VALID) {
  			DRM_DEBUG_DRIVER("Unexpected fault\n"
@@ -1230,7 +1230,7 @@ static void gen6_check_faults(struct drm_i915_private *dev_priv)
  	}
  }
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
+static void gen8_check_faults(struct drm_i915_private *i915)
  {
  	u32 fault = I915_READ(GEN8_RING_FAULT_REG);
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index b3bf47e8162f..8ec2d332e206 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -489,11 +489,11 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	u32 addr;
addr = lower_32_bits(phys);
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		addr |= (phys >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
@@ -515,14 +515,14 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	i915_reg_t hwsp;
/*
  	 * The ring status page addresses are no longer next to the rest of
  	 * the ring registers as of gen7.
  	 */
-	if (IS_GEN(dev_priv, 7)) {
+	if (IS_GEN(i915, 7)) {
  		switch (engine->id) {
  		/*
  		 * No more rings exist on Gen7. Default case is only to shut up
@@ -544,7 +544,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
  			hwsp = VEBOX_HWS_PGA_GEN7;
  			break;
  		}
-	} else if (IS_GEN(dev_priv, 6)) {
+	} else if (IS_GEN(i915, 6)) {
  		hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
  	} else {
  		hwsp = RING_HWS_PGA(engine->mmio_base);
@@ -556,9 +556,9 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
- if (!IS_GEN_RANGE(dev_priv, 6, 7))
+	if (!IS_GEN_RANGE(i915, 6, 7))
  		return;
/* ring should be idle before issuing a sync flush*/
@@ -585,9 +585,9 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
static bool stop_ring(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) > 2) {
+	if (INTEL_GEN(i915) > 2) {
  		ENGINE_WRITE(engine,
  			     RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
  		if (intel_wait_for_register(engine->uncore,
@@ -622,7 +622,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
  	struct intel_ring *ring = engine->buffer;
  	int ret = 0;
@@ -654,7 +654,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
  		}
  	}
- if (HWS_NEEDS_PHYSICAL(dev_priv))
+	if (HWS_NEEDS_PHYSICAL(i915))
  		ring_setup_phys_status_page(engine);
  	else
  		ring_setup_status_page(engine);
@@ -705,7 +705,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
  		goto out;
  	}
- if (INTEL_GEN(dev_priv) > 2)
+	if (INTEL_GEN(i915) > 2)
  		ENGINE_WRITE(engine,
  			     RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
@@ -815,7 +815,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq) static int rcs_resume(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
/*
  	 * Disable CONSTANT_BUFFER before it is loaded from the context
@@ -827,12 +827,12 @@ static int rcs_resume(struct intel_engine_cs *engine)
  	 * they are already accustomed to from before contexts were
  	 * enabled.
  	 */
-	if (IS_GEN(dev_priv, 4))
+	if (IS_GEN(i915, 4))
  		I915_WRITE(ECOSKPD,
  			   _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-	if (IS_GEN_RANGE(dev_priv, 4, 6))
+	if (IS_GEN_RANGE(i915, 4, 6))
  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -841,22 +841,22 @@ static int rcs_resume(struct intel_engine_cs *engine)
  	 *
  	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  	 */
-	if (IS_GEN_RANGE(dev_priv, 6, 7))
+	if (IS_GEN_RANGE(i915, 6, 7))
  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
  	/* WaEnableFlushTlbInvalidationMode:snb */
-	if (IS_GEN(dev_priv, 6))
+	if (IS_GEN(i915, 6))
  		I915_WRITE(GFX_MODE,
  			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-	if (IS_GEN(dev_priv, 7))
+	if (IS_GEN(i915, 7))
  		I915_WRITE(GFX_MODE_GEN7,
  			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
  			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN(dev_priv, 6)) {
+	if (IS_GEN(i915, 6)) {
  		/* From the Sandybridge PRM, volume 1 part 3, page 24:
  		 * "If this bit is set, STCunit will have LRA as replacement
  		 *  policy. [...] This bit must be reset.  LRA replacement
@@ -866,7 +866,7 @@ static int rcs_resume(struct intel_engine_cs *engine)
  			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  	}
- if (IS_GEN_RANGE(dev_priv, 6, 7))
+	if (IS_GEN_RANGE(i915, 6, 7))
  		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return xcs_resume(engine);
@@ -1227,15 +1227,15 @@ void intel_ring_unpin(struct intel_ring *ring)
  }
static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
+intel_ring_create_vma(struct drm_i915_private *i915, int size)
  {
-	struct i915_address_space *vm = &dev_priv->ggtt.vm;
+	struct i915_address_space *vm = &i915->ggtt.vm;
  	struct drm_i915_gem_object *obj;
  	struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(dev_priv, size);
+	obj = i915_gem_object_create_stolen(i915, size);
  	if (!obj)
-		obj = i915_gem_object_create_internal(dev_priv, size);
+		obj = i915_gem_object_create_internal(i915, size);
  	if (IS_ERR(obj))
  		return ERR_CAST(obj);
@@ -2115,9 +2115,9 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) static void ring_destroy(struct intel_engine_cs *engine)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+	WARN_ON(INTEL_GEN(i915) > 2 &&
  		(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 1fa2f65c3cd1..8807de566361 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -40,7 +40,7 @@
  static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	unsigned int flags;
  	u64 start, end, size;
  	struct drm_mm_node *node;
@@ -60,14 +60,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
  		flags = PIN_MAPPABLE;
  	}
- mutex_lock(&dev_priv->drm.struct_mutex);
-	mmio_hw_access_pre(dev_priv);
-	ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+	mutex_lock(&i915->drm.struct_mutex);
+	mmio_hw_access_pre(i915);
+	ret = i915_gem_gtt_insert(&i915->ggtt.vm, node,
  				  size, I915_GTT_PAGE_SIZE,
  				  I915_COLOR_UNEVICTABLE,
  				  start, end, flags);
-	mmio_hw_access_post(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mmio_hw_access_post(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
  	if (ret)
  		gvt_err("fail to alloc %s gm space from host\n",
  			high_gm ? "high" : "low");
@@ -78,7 +78,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
  static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ret;
ret = alloc_gm(vgpu, false);
@@ -97,20 +97,20 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
  out_free_aperture:
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	drm_mm_remove_node(&vgpu->gm.low_gm_node);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  	return ret;
  }
static void free_vgpu_gm(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	drm_mm_remove_node(&vgpu->gm.low_gm_node);
  	drm_mm_remove_node(&vgpu->gm.high_gm_node);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  }
/**
@@ -127,11 +127,11 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
  		u32 fence, u64 value)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	struct drm_i915_fence_reg *reg;
  	i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(i915);
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
  		return;
@@ -162,41 +162,41 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
  static void free_vgpu_fence(struct intel_vgpu *vgpu)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	struct drm_i915_fence_reg *reg;
  	u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
  		return;
- intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(i915);
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	_clear_vgpu_fence(vgpu);
  	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
  		reg = vgpu->fence.regs[i];
  		i915_unreserve_fence(reg);
  		vgpu->fence.regs[i] = NULL;
  	}
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+	intel_runtime_pm_put_unchecked(i915);
  }
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	struct drm_i915_fence_reg *reg;
  	int i;
- intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(i915);
/* Request fences from host */
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
-		reg = i915_reserve_fence(dev_priv);
+		reg = i915_reserve_fence(i915);
  		if (IS_ERR(reg))
  			goto out_free_fence;
@@ -205,8 +205,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); - mutex_unlock(&dev_priv->drm.struct_mutex);
-	intel_runtime_pm_put_unchecked(dev_priv);
+	mutex_unlock(&i915->drm.struct_mutex);
+	intel_runtime_pm_put_unchecked(i915);
  	return 0;
  out_free_fence:
  	gvt_vgpu_err("Failed to alloc fences\n");
@@ -218,8 +218,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
  		i915_unreserve_fence(reg);
  		vgpu->fence.regs[i] = NULL;
  	}
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	intel_runtime_pm_put_unchecked(dev_priv);
+	mutex_unlock(&i915->drm.struct_mutex);
+	intel_runtime_pm_put_unchecked(i915);
  	return -ENOSPC;
  }
@@ -313,11 +313,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
   */
  void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
- intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(i915);
  	_clear_vgpu_fence(vgpu);
-	intel_runtime_pm_put_unchecked(dev_priv);
+	intel_runtime_pm_put_unchecked(i915);
  }
/**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bbe059d..d3a744e7c959 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -391,9 +391,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
  	memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
-				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+				pci_resource_len(gvt->i915->drm.pdev, 0);
  	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
-				pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+				pci_resource_len(gvt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
  }
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6ea88270c818..2b09acc0502f 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -819,7 +819,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
  	unsigned int data;
  	u32 ring_base;
  	u32 nopid;
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
if (!strcmp(cmd, "lri"))
  		data = cmd_val(s, index + 1);
@@ -829,7 +829,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
  		return -EINVAL;
  	}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+	ring_base = i915->engine[s->ring_id]->mmio_base;
  	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -905,7 +905,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
  	 * update reg values in it into vregs, so LRIs in workload with
  	 * inhibit context will restore with correct values
  	 */
-	if (IS_GEN(gvt->dev_priv, 9) &&
+	if (IS_GEN(gvt->i915, 9) &&
  			intel_gvt_mmio_is_in_ctx(gvt, offset) &&
  			!strncmp(cmd, "lri", 3)) {
  		intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -946,7 +946,7 @@ static int cmd_handler_lri(struct parser_exec_state *s)
  	struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
-		if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+		if (IS_BROADWELL(gvt->i915) && s->ring_id != RCS0) {
  			if (s->ring_id == BCS0 &&
  			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
  				ret |= 0;
@@ -968,7 +968,7 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
  	int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
-		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+		if (IS_BROADWELL(s->vgpu->gvt->i915))
  			ret |= ((cmd_reg_inhibit(s, i) ||
  					(cmd_reg_inhibit(s, i + 1)))) ?
  				-EBADRQC : 0;
@@ -996,7 +996,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
  	int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
-		if (IS_BROADWELL(gvt->dev_priv))
+		if (IS_BROADWELL(gvt->i915))
  			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
  		if (ret)
  			break;
@@ -1180,7 +1180,7 @@ struct plane_code_mapping {
  static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
  		struct mi_display_flip_command_info *info)
  {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
  	struct plane_code_mapping gen8_plane_code[] = {
  		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
  		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1226,7 +1226,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
  static int skl_decode_mi_display_flip(struct parser_exec_state *s,
  		struct mi_display_flip_command_info *info)
  {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
  	struct intel_vgpu *vgpu = s->vgpu;
  	u32 dword0 = cmd_val(s, 0);
  	u32 dword1 = cmd_val(s, 1);
@@ -1285,13 +1285,13 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
  static int gen8_check_mi_display_flip(struct parser_exec_state *s,
  		struct mi_display_flip_command_info *info)
  {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
  	u32 stride, tile;
if (!info->async_flip)
  		return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
  		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
  				GENMASK(12, 10)) >> 10;
@@ -1314,12 +1314,12 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
  		struct parser_exec_state *s,
  		struct mi_display_flip_command_info *info)
  {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
  	struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
  		      info->surf_val << 12);
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
  			      info->stride_val);
  		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1345,11 +1345,11 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
  static int decode_mi_display_flip(struct parser_exec_state *s,
  		struct mi_display_flip_command_info *info)
  {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = s->vgpu->gvt->i915;
- if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(i915))
  		return gen8_decode_mi_display_flip(s, info);
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1756,7 +1756,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
  	if (bb->ppgtt)
  		start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+	bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->i915,
  					       round_up(bb_size + start_offset,
  							PAGE_SIZE));
  	if (IS_ERR(bb->obj)) {
@@ -2830,7 +2830,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  	int ret = 0;
  	void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+	obj = i915_gem_object_create_shmem(workload->vgpu->gvt->i915,
  					   roundup(ctx_size + CACHELINE_BYTES,
  						   PAGE_SIZE));
  	if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2fb7b73b260d..1ff7f11c89a2 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,7 +58,7 @@ static int mmio_offset_compare(void *priv,
  static inline int mmio_diff_handler(struct intel_gvt *gvt,
  				    u32 offset, void *data)
  {
-	struct drm_i915_private *i915 = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	struct mmio_diff_param *param = data;
  	struct diff_mmio *node;
  	u32 preg, vreg;
@@ -98,10 +98,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
  	mutex_lock(&gvt->lock);
  	spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->dev_priv);
+	mmio_hw_access_pre(gvt->i915);
  	/* Recognize all the diff mmios to list. */
  	intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
-	mmio_hw_access_post(gvt->dev_priv);
+	mmio_hw_access_post(gvt->i915);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
  	mutex_unlock(&gvt->lock);
@@ -142,7 +142,7 @@ static int
  vgpu_scan_nonprivbb_set(void *data, u64 val)
  {
  	struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	enum intel_engine_id id;
  	char buf[128], *s;
  	int len;
@@ -164,7 +164,7 @@ vgpu_scan_nonprivbb_set(void *data, u64 val)
  	for (id = 0; id < I915_NUM_ENGINES; id++) {
  		struct intel_engine_cs *engine;
- engine = dev_priv->engine[id];
+		engine = i915->engine[id];
  		if (engine && (val & (1 << id))) {
  			len = snprintf(s, 4, "%d, ", engine->id);
  			s += len;
@@ -240,7 +240,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
   */
  int intel_gvt_debugfs_init(struct intel_gvt *gvt)
  {
-	struct drm_minor *minor = gvt->dev_priv->drm.primary;
+	struct drm_minor *minor = gvt->i915->drm.primary;
  	struct dentry *ent;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e1c313da6c00..b037aeed2f40 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
  		return 0;
@@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
  		return -EINVAL;
@@ -168,10 +168,10 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int pipe;
- if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
  			BXT_DE_PORT_HP_DDIB |
  			BXT_DE_PORT_HP_DDIC);
@@ -198,8 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  			SDE_PORTC_HOTPLUG_CPT |
  			SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv)) {
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+	    IS_COFFEELAKE(i915)) {
  		vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
  				SDE_PORTE_HOTPLUG_SPT);
  		vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -223,7 +223,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
  			(PORT_B << TRANS_DDI_PORT_SHIFT) |
  			TRANS_DDI_FUNC_ENABLE);
-		if (IS_BROADWELL(dev_priv)) {
+		if (IS_BROADWELL(i915)) {
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
  				~PORT_CLK_SEL_MASK;
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
@@ -243,7 +243,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
  			(PORT_C << TRANS_DDI_PORT_SHIFT) |
  			TRANS_DDI_FUNC_ENABLE);
-		if (IS_BROADWELL(dev_priv)) {
+		if (IS_BROADWELL(i915)) {
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
  				~PORT_CLK_SEL_MASK;
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
@@ -263,7 +263,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
  			(PORT_D << TRANS_DDI_PORT_SHIFT) |
  			TRANS_DDI_FUNC_ENABLE);
-		if (IS_BROADWELL(dev_priv)) {
+		if (IS_BROADWELL(i915)) {
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
  				~PORT_CLK_SEL_MASK;
  			vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
@@ -274,14 +274,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  		vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
  	}
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
-	     IS_COFFEELAKE(dev_priv)) &&
+	if ((IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+	     IS_COFFEELAKE(i915)) &&
  			intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
  		vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
  	}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
-		if (IS_BROADWELL(dev_priv))
+		if (IS_BROADWELL(i915))
  			vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
  				GEN8_PORT_DP_A_HOTPLUG;
  		else
@@ -291,11 +291,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
  	}
/* Clear host CRT status, so guest couldn't detect this host CRT. */
-	if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(i915))
  		vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
/* Disable Primary/Sprite/Cursor plane */
-	for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
  		vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
  		vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
@@ -389,7 +389,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_vgpu_irq *irq = &vgpu->irq;
  	int vblank_event[] = {
  		[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +421,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
  	int pipe;
mutex_lock(&vgpu->vgpu_lock);
-	for_each_pipe(vgpu->gvt->dev_priv, pipe)
+	for_each_pipe(vgpu->gvt->i915, pipe)
  		emulate_vblank_on_pipe(vgpu, pipe);
  	mutex_unlock(&vgpu->vgpu_lock);
  }
@@ -454,10 +454,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
   */
  void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
/* TODO: add more platforms support */
-	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915)) {
  		if (connected) {
  			vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
  				SFUSE_STRAP_DDID_DETECTED;
@@ -483,10 +483,10 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
   */
  void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv))
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+	    IS_COFFEELAKE(i915))
  		clean_virtual_dp_monitor(vgpu, PORT_D);
  	else
  		clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -505,12 +505,12 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
   */
  int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv))
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+	    IS_COFFEELAKE(i915))
  		return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
  						resolution);
  	else
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 41c8ebc60c63..523038d224a1 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -39,7 +39,7 @@
  static int vgpu_gem_get_pages(
  		struct drm_i915_gem_object *obj)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct sg_table *st;
  	struct scatterlist *sg;
  	int i, ret;
@@ -61,7 +61,7 @@ static int vgpu_gem_get_pages(
  		kfree(st);
  		return ret;
  	}
-	gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+	gtt_entries = (gen8_pte_t __iomem *)i915->ggtt.gsm +
  		(fb_info->start >> PAGE_SHIFT);
  	for_each_sg(st->sgl, sg, page_num, i) {
  		sg->offset = 0;
@@ -152,7 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
  static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
  		struct intel_vgpu_fb_info *info)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc();
@@ -165,7 +165,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
  	obj->write_domain = 0;
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		unsigned int tiling_mode = 0;
  		unsigned int stride = 0;
@@ -360,7 +360,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
  {
-	struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+	struct drm_device *dev = &vgpu->gvt->i915->drm;
  	struct vfio_device_gfx_plane_info *gfx_plane_info = args;
  	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  	struct intel_vgpu_fb_info fb_info;
@@ -466,7 +466,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
  /* To associate an exposed dmabuf with the dmabuf_obj */
  int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
  {
-	struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+	struct drm_device *dev = &vgpu->gvt->i915->drm;
  	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  	struct drm_i915_gem_object *obj;
  	struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124918f1..9e45cbac5eed 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
  static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
  			unsigned int offset, void *p_data, unsigned int bytes)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,9 +147,9 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
  	if (pin_select == 0)
  		return 0;
- if (IS_BROXTON(dev_priv))
+	if (IS_BROXTON(i915))
  		port = bxt_get_port_from_gmbus0(pin_select);
-	else if (IS_COFFEELAKE(dev_priv))
+	else if (IS_COFFEELAKE(i915))
  		port = cnp_get_port_from_gmbus0(pin_select);
  	else
  		port = get_port_from_gmbus0(pin_select);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f21b8fb5b37e..7a7a925565a0 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -40,7 +40,7 @@
  #define _EL_OFFSET_STATUS_PTR   0x3A0
#define execlist_ring_mmio(gvt, ring_id, offset) \
-	(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+	(gvt->i915->engine[ring_id]->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
  #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -133,7 +133,7 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
  	u32 write_pointer;
  	u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
  	unsigned long hwsp_gpa;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
  			_EL_OFFSET_STATUS_PTR);
@@ -169,7 +169,7 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
  			status, 8);
  		intel_gvt_hypervisor_write_gpa(vgpu,
  			hwsp_gpa +
-			intel_hws_csb_write_index(dev_priv) * 4,
+			intel_hws_csb_write_index(i915) * 4,
  			&write_pointer, 4);
  	}
@@ -529,12 +529,12 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
  static void clean_execlist(struct intel_vgpu *vgpu,
  			   intel_engine_mask_t engine_mask)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_engine_cs *engine;
  	struct intel_vgpu_submission *s = &vgpu->submission;
  	intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+	for_each_engine_masked(engine, i915, engine_mask, tmp) {
  		kfree(s->ring_scan_buffer[engine->id]);
  		s->ring_scan_buffer[engine->id] = NULL;
  		s->ring_scan_buffer_size[engine->id] = 0;
@@ -544,11 +544,11 @@ static void clean_execlist(struct intel_vgpu *vgpu,
  static void reset_execlist(struct intel_vgpu *vgpu,
  			   intel_engine_mask_t engine_mask)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_engine_cs *engine;
  	intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+	for_each_engine_masked(engine, i915, engine_mask, tmp)
  		init_vgpu_execlist(vgpu, engine->id);
  }
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..76c346eecda3 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,12 +146,12 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
  static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
  	u32 tiled, int stride_mask, int bpp)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
  	u32 stride = stride_reg;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		switch (tiled) {
  		case PLANE_CTL_TILED_LINEAR:
  			stride = stride_reg * 64;
@@ -203,7 +203,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
  	struct intel_vgpu_primary_plane_format *plane)
  {
  	u32 val, fmt;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int pipe;
pipe = get_active_pipe(vgpu);
@@ -215,7 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
  	if (!plane->enabled)
  		return -ENODEV;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		plane->tiled = val & PLANE_CTL_TILED_MASK;
  		fmt = skl_format_to_drm(
  			val & PLANE_CTL_FORMAT_MASK,
@@ -256,7 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
  	}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
-		(INTEL_GEN(dev_priv) >= 9) ?
+		(INTEL_GEN(i915) >= 9) ?
  			(_PRI_PLANE_STRIDE_MASK >> 6) :
  				_PRI_PLANE_STRIDE_MASK, plane->bpp);
@@ -334,7 +334,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
  {
  	u32 val, mode, index;
  	u32 alpha_plane, alpha_force;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int pipe;
pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 049775e8e350..efdc9df4f731 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,7 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
  {
-	struct drm_i915_private *i915 = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
*(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
  							    _MMIO(offset));
@@ -78,7 +78,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
  static int expose_firmware_sysfs(struct intel_gvt *gvt)
  {
  	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->i915->drm.pdev;
  	struct gvt_firmware_header *h;
  	void *firmware;
  	void *p;
@@ -129,7 +129,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
  {
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
  	vfree(firmware_attr.private);
@@ -153,8 +153,8 @@ static int verify_firmware(struct intel_gvt *gvt,
  			   const struct firmware *fw)
  {
  	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = gvt->i915;
+	struct pci_dev *pdev = i915->drm.pdev;
  	struct gvt_firmware_header *h;
  	unsigned long id, crc32_start;
  	const void *mem;
@@ -208,8 +208,8 @@ static int verify_firmware(struct intel_gvt *gvt,
  int intel_gvt_load_firmware(struct intel_gvt *gvt)
  {
  	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = gvt->i915;
+	struct pci_dev *pdev = i915->drm.pdev;
  	struct intel_gvt_firmware *firmware = &gvt->firmware;
  	struct gvt_firmware_header *h;
  	const struct firmware *fw;
@@ -244,7 +244,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path); - ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+	ret = request_firmware(&fw, path, &i915->drm.pdev->dev);
  	kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..390562fe3223 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -275,24 +275,24 @@ static inline int get_pse_type(int type)
  	return gtt_type_table[type].pse_entry_type;
  }
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct drm_i915_private *i915, unsigned long index)
  {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)i915->ggtt.gsm + index;
return readq(addr);
  }
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct drm_i915_private *i915)
  {
-	mmio_hw_access_pre(dev_priv);
+	mmio_hw_access_pre(i915);
  	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-	mmio_hw_access_post(dev_priv);
+	mmio_hw_access_post(i915);
  }
-static void write_pte64(struct drm_i915_private *dev_priv,
+static void write_pte64(struct drm_i915_private *i915,
  		unsigned long index, u64 pte)
  {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)i915->ggtt.gsm + index;
writeq(pte, addr);
  }
@@ -315,7 +315,7 @@ static inline int gtt_get_entry64(void *pt,
  		if (WARN_ON(ret))
  			return ret;
  	} else if (!pt) {
-		e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+		e->val64 = read_pte64(vgpu->gvt->i915, index);
  	} else {
  		e->val64 = *((u64 *)pt + index);
  	}
@@ -340,7 +340,7 @@ static inline int gtt_set_entry64(void *pt,
  		if (WARN_ON(ret))
  			return ret;
  	} else if (!pt) {
-		write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+		write_pte64(vgpu->gvt->i915, index, e->val64);
  	} else {
  		*((u64 *)pt + index) = e->val64;
  	}
@@ -734,7 +734,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
  {
-	struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *kdev = &spt->vgpu->gvt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); @@ -819,7 +819,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
  static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
  		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
  {
-	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *kdev = &vgpu->gvt->i915->drm.pdev->dev;
  	struct intel_vgpu_ppgtt_spt *spt = NULL;
  	dma_addr_t daddr;
  	int ret;
@@ -1044,14 +1044,14 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
- if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+	if (INTEL_GEN(i915) == 9 || INTEL_GEN(i915) == 10) {
  		u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
  			GAMW_ECO_ENABLE_64K_IPS_FIELD;
return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
-	} else if (INTEL_GEN(dev_priv) >= 11) {
+	} else if (INTEL_GEN(i915) >= 11) {
  		/* 64K paging only controlled by IPS bit in PTE now. */
  		return true;
  	} else
@@ -1153,7 +1153,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
  	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  	unsigned long pfn;
- if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+	if (!HAS_PAGE_SIZES(vgpu->gvt->i915, I915_GTT_PAGE_SIZE_2M))
  		return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -2301,7 +2301,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  	ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
-	ggtt_invalidate(gvt->dev_priv);
+	ggtt_invalidate(gvt->i915);
  	return 0;
  }
@@ -2340,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
  				vgpu->gvt->device_info.gtt_entry_size_shift;
  	void *scratch_pt;
  	int i;
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->i915->drm.pdev->dev;
  	dma_addr_t daddr;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
@@ -2397,7 +2397,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
  static int release_scratch_page_tree(struct intel_vgpu *vgpu)
  {
  	int i;
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->i915->drm.pdev->dev;
  	dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2669,7 +2669,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
  {
  	int ret;
  	void *page;
-	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &gvt->i915->drm.pdev->dev;
  	dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2718,7 +2718,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
   */
  void intel_gvt_clean_gtt(struct intel_gvt *gvt)
  {
-	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &gvt->i915->drm.pdev->dev;
  	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
  					I915_GTT_PAGE_SHIFT);
@@ -2766,7 +2766,7 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
  void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
  	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
  	struct intel_gvt_gtt_entry old_entry;
@@ -2796,7 +2796,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
  		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
  	}
- ggtt_invalidate(dev_priv);
+	ggtt_invalidate(i915);
  }
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 43f4242062dd..b84cdd8d8c98 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,7 +52,7 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
  	int i;
  	struct intel_vgpu_type *t;
  	const char *driver_name = dev_driver_string(
-			&gvt->dev_priv->drm.pdev->dev);
+			&gvt->i915->drm.pdev->dev);
for (i = 0; i < gvt->num_types; i++) {
  		t = &gvt->types[i];
@@ -191,7 +191,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
  static void init_device_info(struct intel_gvt *gvt)
  {
  	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->i915->drm.pdev;
info->max_support_vgpus = 8;
  	info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,15 +257,15 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
   * intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
   *
   * This function is called at the driver unloading stage, to free the
   * resources owned by a GVT device.
   *
   */
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
  {
-	struct intel_gvt *gvt = to_gvt(dev_priv);
+	struct intel_gvt *gvt = to_gvt(i915);
if (WARN_ON(!gvt))
  		return;
@@ -285,13 +285,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
  	intel_gvt_clean_mmio_info(gvt);
  	idr_destroy(&gvt->vgpu_idr);
- kfree(dev_priv->gvt);
-	dev_priv->gvt = NULL;
+	kfree(i915->gvt);
+	i915->gvt = NULL;
  }
/**
   * intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
   *
   * This function is called at the initialization stage, to initialize
   * necessary GVT components.
@@ -300,13 +300,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
   * Zero on success, negative error code if failed.
   *
   */
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
  {
  	struct intel_gvt *gvt;
  	struct intel_vgpu *vgpu;
  	int ret;
- if (WARN_ON(dev_priv->gvt))
+	if (WARN_ON(i915->gvt))
  		return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +319,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
  	spin_lock_init(&gvt->scheduler.mmio_context_lock);
  	mutex_init(&gvt->lock);
  	mutex_init(&gvt->sched_lock);
-	gvt->dev_priv = dev_priv;
+	gvt->i915 = i915;
init_device_info(gvt); @@ -380,8 +380,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
  		gvt_err("debugfs registration failed, go on.\n");
gvt_dbg_core("gvt device initialization is done\n");
-	dev_priv->gvt = gvt;
-	intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+	i915->gvt = gvt;
+	intel_gvt_host.dev = &i915->drm.pdev->dev;
  	intel_gvt_host.initialized = true;
  	return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b54f2bdc13a4..fbebe0ae4096 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -306,7 +306,7 @@ struct intel_gvt {
  	/* scheduler scope lock, protect gvt and vgpu schedule related data */
  	struct mutex sched_lock;
- struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	struct idr vgpu_idr;	/* vGPU IDR pool */
struct intel_gvt_device_info device_info;
@@ -373,12 +373,12 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
  #define HOST_FENCE 4
/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_aperture_sz(gvt)	  (gvt->i915->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->i915->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
+#define gvt_ggtt_gm_sz(gvt)	  (gvt->i915->ggtt.vm.total)
  #define gvt_ggtt_sz(gvt) \
-	((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
+	((gvt->i915->ggtt.vm.total >> PAGE_SHIFT) << 3)
  #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
@@ -390,7 +390,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
  #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
  				   + gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+#define gvt_fence_sz(gvt) (gvt->i915->num_fence_regs)
/* Aperture/GM space definitions for vGPU */
  #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
@@ -582,14 +582,14 @@ enum {
  	GVT_FAILSAFE_GUEST_ERR,
  };
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct drm_i915_private *i915)
  {
-	intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(i915);
  }
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct drm_i915_private *i915)
  {
-	intel_runtime_pm_put_unchecked(dev_priv);
+	intel_runtime_pm_put_unchecked(i915);
  }
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a6ade66349bd..9acb01b0b7c7 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,15 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
  {
-	if (IS_BROADWELL(gvt->dev_priv))
+	if (IS_BROADWELL(gvt->i915))
  		return D_BDW;
-	else if (IS_SKYLAKE(gvt->dev_priv))
+	else if (IS_SKYLAKE(gvt->i915))
  		return D_SKL;
-	else if (IS_KABYLAKE(gvt->dev_priv))
+	else if (IS_KABYLAKE(gvt->i915))
  		return D_KBL;
-	else if (IS_BROXTON(gvt->dev_priv))
+	else if (IS_BROXTON(gvt->i915))
  		return D_BXT;
-	else if (IS_COFFEELAKE(gvt->dev_priv))
+	else if (IS_COFFEELAKE(gvt->i915))
  		return D_CFL;
return 0;
@@ -156,7 +156,7 @@ int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
  	struct intel_engine_cs *engine;
offset &= ~GENMASK(11, 0);
-	for_each_engine(engine, gvt->dev_priv, id) {
+	for_each_engine(engine, gvt->i915, id) {
  		if (engine->mmio_base == offset)
  			return id;
  	}
@@ -217,7 +217,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
  {
  	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+	if (INTEL_GEN(vgpu->gvt->i915) <= 10) {
  		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
  			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
  		else if (!ips)
@@ -253,7 +253,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
  static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  		void *p_data, unsigned int bytes)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	unsigned int fence_num = offset_to_fence_num(off);
  	int ret;
@@ -262,10 +262,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  		return ret;
  	write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(dev_priv);
+	mmio_hw_access_pre(i915);
  	intel_vgpu_write_fence(vgpu, fence_num,
  			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
-	mmio_hw_access_post(dev_priv);
+	mmio_hw_access_post(i915);
  	return 0;
  }
@@ -283,7 +283,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
  	old = vgpu_vreg(vgpu, offset);
  	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
+	if (INTEL_GEN(vgpu->gvt->i915)  >=  9) {
  		switch (offset) {
  		case FORCEWAKE_RENDER_GEN9_REG:
  			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -341,7 +341,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
  			engine_mask |= BIT(VCS1);
  		}
-		engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+		engine_mask &= INTEL_INFO(vgpu->gvt->i915)->engine_mask;
  	}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -511,7 +511,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
  	u32 reg_nonpriv = *(u32 *)p_data;
  	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
  	u32 ring_base;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int ret = -EINVAL;
if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
@@ -520,7 +520,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
  		return ret;
  	}
- ring_base = dev_priv->engine[ring_id]->mmio_base;
+	ring_base = i915->engine[ring_id]->mmio_base;
if (in_whitelist(reg_nonpriv) ||
  		reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
@@ -751,7 +751,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
  static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  		void *p_data, unsigned int bytes)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	u32 pipe = DSPSURF_TO_PIPE(offset);
  	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -792,7 +792,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
  			       unsigned int offset, void *p_data,
  			       unsigned int bytes)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	enum pipe pipe = REG_50080_TO_PIPE(offset);
  	enum plane_id plane = REG_50080_TO_PLANE(offset);
  	int event = SKL_FLIP_EVENT(pipe, plane);
@@ -816,7 +816,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
  static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
  		unsigned int reg)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	enum intel_gvt_event_type event;
if (reg == _DPA_AUX_CH_CTL)
@@ -916,11 +916,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
  	write_vreg(vgpu, offset, p_data, bytes);
  	data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+	if ((INTEL_GEN(vgpu->gvt->i915) >= 9)
  		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
  		/* SKL DPB/C/D aux ctl register changed */
  		return 0;
-	} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+	} else if (IS_BROADWELL(vgpu->gvt->i915) &&
  		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
  		/* write to the data registers */
  		return 0;
@@ -1236,8 +1236,8 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
+	struct kobject *kobj = &i915->drm.primary->kdev->kobj;
  	char *env[3] = {NULL, NULL, NULL};
  	char vmid_str[20];
  	char display_ready_str[20];
@@ -1416,9 +1416,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
  	case GEN9_PCODE_READ_MEM_LATENCY:
-		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
-			 || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+		if (IS_SKYLAKE(vgpu->gvt->i915)
+			 || IS_KABYLAKE(vgpu->gvt->i915)
+			 || IS_COFFEELAKE(vgpu->gvt->i915)) {
  			/**
  			 * "Read memory latency" command on gen9.
  			 * Below memory latency values are read
@@ -1428,7 +1428,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
  				*data0 = 0x1e1a1100;
  			else
  				*data0 = 0x61514b3d;
-		} else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+		} else if (IS_BROXTON(vgpu->gvt->i915)) {
  			/**
  			 * "Read memory latency" command on gen9.
  			 * Below memory latency values are read
@@ -1441,9 +1441,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
  		}
  		break;
  	case SKL_PCODE_CDCLK_CONTROL:
-		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
-			 || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+		if (IS_SKYLAKE(vgpu->gvt->i915)
+			 || IS_KABYLAKE(vgpu->gvt->i915)
+			 || IS_COFFEELAKE(vgpu->gvt->i915))
  			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
  		break;
  	case GEN6_PCODE_READ_RC6VIDS:
@@ -1496,7 +1496,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
  {
  	u32 v = *(u32 *)p_data;
- if (IS_BROXTON(vgpu->gvt->dev_priv))
+	if (IS_BROXTON(vgpu->gvt->i915))
  		v &= (1 << 31) | (1 << 29);
  	else
  		v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1633,7 +1633,7 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
  		unsigned int offset, void *p_data, unsigned int bytes)
  {
  	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ring_id;
  	u32 ring_base;
@@ -1645,14 +1645,14 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
  	 * c. the offset is ring time stamp mmio
  	 */
  	if (ring_id >= 0)
-		ring_base = dev_priv->engine[ring_id]->mmio_base;
+		ring_base = i915->engine[ring_id]->mmio_base;
if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
  	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
  	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
-		mmio_hw_access_pre(dev_priv);
+		mmio_hw_access_pre(i915);
  		vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-		mmio_hw_access_post(dev_priv);
+		mmio_hw_access_post(i915);
  	}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1693,7 +1693,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  	int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
-	if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+	if (IS_COFFEELAKE(vgpu->gvt->i915))
  		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
  	write_vreg(vgpu, offset, p_data, bytes);
@@ -1702,7 +1702,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  		return 0;
  	}
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+	if (IS_COFFEELAKE(vgpu->gvt->i915) &&
  	    data & _MASKED_BIT_ENABLE(2)) {
  		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
  		return 0;
@@ -1834,7 +1834,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
  	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
  	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
  	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
-	if (HAS_ENGINE(dev_priv, VCS1)) \
+	if (HAS_ENGINE(i915, VCS1)) \
  		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
  } while (0)
@@ -1855,7 +1855,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, static int init_generic_mmio_info(struct intel_gvt *gvt)
  {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2670,7 +2670,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_broadwell_mmio_info(struct intel_gvt *gvt)
  {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2859,7 +2859,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
  {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -3108,7 +3108,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
  {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3344,7 +3344,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
  int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
  {
  	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
  	int ret;
@@ -3356,20 +3356,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
  	if (ret)
  		goto err;
- if (IS_BROADWELL(dev_priv)) {
+	if (IS_BROADWELL(i915)) {
  		ret = init_broadwell_mmio_info(gvt);
  		if (ret)
  			goto err;
-	} else if (IS_SKYLAKE(dev_priv)
-		|| IS_KABYLAKE(dev_priv)
-		|| IS_COFFEELAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)
+		|| IS_KABYLAKE(i915)
+		|| IS_COFFEELAKE(i915)) {
  		ret = init_broadwell_mmio_info(gvt);
  		if (ret)
  			goto err;
  		ret = init_skl_mmio_info(gvt);
  		if (ret)
  			goto err;
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
  		ret = init_broadwell_mmio_info(gvt);
  		if (ret)
  			goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 951681813230..37e2e72bfacc 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
  	SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
  	SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+	if (HAS_ENGINE(gvt->i915, VCS1)) {
  		SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
  			INTEL_GVT_IRQ_INFO_GT1);
  		SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +568,7 @@ static void gen8_init_irq(
  	SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
  	SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+	if (IS_BROADWELL(gvt->i915)) {
  		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
  		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
  		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
  		SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-	} else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+	} else if (INTEL_GEN(gvt->i915) >= 9) {
  		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
  		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
  		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..615a7959b762 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -187,7 +187,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
  static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
  		dma_addr_t *dma_addr, unsigned long size)
  {
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->i915->drm.pdev->dev;
  	struct page *page = NULL;
  	int ret;
@@ -210,7 +210,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
  static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
  		dma_addr_t dma_addr, unsigned long size)
  {
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
  	gvt_unpin_guest_page(vgpu, gfn, size);
@@ -913,7 +913,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
  		return -EINVAL;
  	}
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+	aperture_va = io_mapping_map_wc(&vgpu->gvt->i915->ggtt.iomap,
  					ALIGN_DOWN(off, PAGE_SIZE),
  					count + offset_in_page(off));
  	if (!aperture_va)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..572b8328af61 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -245,7 +245,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
  		/* set the bit 0:2(Core C-State ) to C0 */
  		vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
- if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+		if (IS_BROXTON(vgpu->gvt->i915)) {
  			vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
  				    ~(BIT(0) | BIT(1));
  			vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2998999e8568..44fd6a0a698c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -148,7 +148,7 @@ static struct {
  	u32 l3cc_table[GEN9_MOCS_SIZE / 2];
  } gen9_render_mocs;
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(struct drm_i915_private *i915)
  {
  	i915_reg_t offset;
  	u32 regs[] = {
@@ -161,7 +161,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
  	int ring_id, i;
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
-		if (!HAS_ENGINE(dev_priv, ring_id))
+		if (!HAS_ENGINE(i915, ring_id))
  			continue;
  		offset.reg = regs[ring_id];
  		for (i = 0; i < GEN9_MOCS_SIZE; i++) {
@@ -329,8 +329,8 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
+	struct intel_uncore *uncore = &i915->uncore;
  	struct intel_vgpu_submission *s = &vgpu->submission;
  	enum forcewake_domains fw;
  	i915_reg_t reg;
@@ -357,7 +357,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
  	 */
  	fw = intel_uncore_forcewake_for_reg(uncore, reg,
  					    FW_REG_READ | FW_REG_WRITE);
-	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+	if (ring_id == RCS0 && INTEL_GEN(i915) >= 9)
  		fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
@@ -377,7 +377,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
  static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
  			int ring_id)
  {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	i915_reg_t offset, l3_offset;
  	u32 old_v, new_v;
@@ -390,15 +390,15 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
  	};
  	int i;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+	i915 = pre ? pre->gvt->i915 : next->gvt->i915;
  	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
  		return;
- if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+	if (ring_id == RCS0 && IS_GEN(i915, 9))
  		return;
if (!pre && !gen9_render_mocs.initialized)
-		load_render_mocs(dev_priv);
+		load_render_mocs(i915);
offset.reg = regs[ring_id];
  	for (i = 0; i < GEN9_MOCS_SIZE; i++) {
@@ -454,16 +454,16 @@ static void switch_mmio(struct intel_vgpu *pre,
  			struct intel_vgpu *next,
  			int ring_id)
  {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	struct intel_vgpu_submission *s;
  	struct engine_mmio *mmio;
  	u32 old_v, new_v;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-	if (INTEL_GEN(dev_priv) >= 9)
+	i915 = pre ? pre->gvt->i915 : next->gvt->i915;
+	if (INTEL_GEN(i915) >= 9)
  		switch_mocs(pre, next, ring_id);
- for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+	for (mmio = i915->gvt->engine_mmio_list.mmio;
  	     i915_mmio_reg_valid(mmio->reg); mmio++) {
  		if (mmio->ring_id != ring_id)
  			continue;
@@ -472,7 +472,7 @@ static void switch_mmio(struct intel_vgpu *pre,
  		 * state image on gen9, it's initialized by lri command and
  		 * save or restore with context together.
  		 */
-		if (IS_GEN(dev_priv, 9) && mmio->in_context)
+		if (IS_GEN(i915, 9) && mmio->in_context)
  			continue;
// save
@@ -536,7 +536,7 @@ static void switch_mmio(struct intel_vgpu *pre,
  void intel_gvt_switch_mmio(struct intel_vgpu *pre,
  			   struct intel_vgpu *next, int ring_id)
  {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
if (WARN_ON(!pre && !next))
  		return;
@@ -544,16 +544,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
  	gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
  		       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+	i915 = pre ? pre->gvt->i915 : next->gvt->i915;
/**
  	 * We are using raw mmio access wrapper to improve the
  	 * performace for batch mmio read/write, so we need
  	 * handle forcewake mannually.
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
  	switch_mmio(pre, next, ring_id);
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
/**
@@ -565,7 +565,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
  {
  	struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9)
+	if (INTEL_GEN(gvt->i915) >= 9)
  		gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
  	else
  		gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 1c763a27a412..64a5d4688c89 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,7 +39,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
  	enum intel_engine_id i;
  	struct intel_engine_cs *engine;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+	for_each_engine(engine, vgpu->gvt->i915, i) {
  		if (!list_empty(workload_q_head(vgpu, i)))
  			return true;
  	}
@@ -152,7 +152,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
  	scheduler->need_reschedule = true;
/* still have uncompleted workload? */
-	for_each_engine(engine, gvt->dev_priv, i) {
+	for_each_engine(engine, gvt->i915, i) {
  		if (scheduler->current_workload[i])
  			return;
  	}
@@ -169,7 +169,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
  	scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
-	for_each_engine(engine, gvt->dev_priv, i)
+	for_each_engine(engine, gvt->i915, i)
  		wake_up(&scheduler->waitq[i]);
  }
@@ -446,7 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
  		&vgpu->gvt->scheduler;
  	int ring_id;
  	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
if (!vgpu_data->active)
  		return;
@@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
  		scheduler->current_vgpu = NULL;
  	}
- intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(i915);
  	spin_lock_bh(&scheduler->mmio_context_lock);
  	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
  		if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
  		}
  	}
  	spin_unlock_bh(&scheduler->mmio_context_lock);
-	intel_runtime_pm_put_unchecked(dev_priv);
+	intel_runtime_pm_put_unchecked(i915);
  	mutex_unlock(&vgpu->gvt->sched_lock);
  }
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e301efb18d45..ec7299d8a738 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -83,9 +83,9 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
  static void sr_oa_regs(struct intel_vgpu_workload *workload,
  		u32 *reg_state, bool save)
  {
-	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
-	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
-	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+	struct drm_i915_private *i915 = workload->vgpu->gvt->i915;
+	u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+	u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
  	int i = 0;
  	u32 flex_mmio[] = {
  		i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -177,11 +177,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
  	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
  			workload->ctx_desc.lrca);
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+	context_page_num = gvt->i915->engine[ring_id]->context_size;
context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+	if (IS_BROADWELL(gvt->i915) && ring_id == RCS0)
  		context_page_num = 19;
i = 2;
@@ -211,8 +211,8 @@ static inline bool is_gvt_request(struct i915_request *req)
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
  {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
+	u32 ring_base = i915->engine[ring_id]->mmio_base;
  	i915_reg_t reg;
reg = RING_INSTDONE(ring_base);
@@ -391,10 +391,10 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
  {
  	struct intel_vgpu *vgpu = workload->vgpu;
  	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct i915_request *rq;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
if (workload->req)
  		return 0;
@@ -421,10 +421,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
  {
  	struct intel_vgpu *vgpu = workload->vgpu;
  	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
if (workload->shadow)
  		return 0;
@@ -577,7 +577,7 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
  {
  	struct intel_vgpu *vgpu = workload->vgpu;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
@@ -586,7 +586,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
  	bb = list_first_entry(&workload->shadow_bb,
  			struct intel_vgpu_shadow_bb, list);
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
  		if (bb->obj) {
@@ -606,7 +606,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
  		kfree(bb);
  	}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  }
static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -671,7 +671,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
  static int dispatch_workload(struct intel_vgpu_workload *workload)
  {
  	struct intel_vgpu *vgpu = workload->vgpu;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_vgpu_submission *s = &vgpu->submission;
  	struct i915_request *rq;
  	int ring_id = workload->ring_id;
@@ -681,7 +681,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
  		ring_id, workload);
mutex_lock(&vgpu->vgpu_lock);
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
ret = set_context_ppgtt_from_shadow(workload,
  					    s->shadow[ring_id]->gem_context);
@@ -723,7 +723,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
  err_req:
  	if (ret)
  		workload->status = ret;
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  	mutex_unlock(&vgpu->vgpu_lock);
  	return ret;
  }
@@ -796,7 +796,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
  	void *src;
  	unsigned long context_gpa, context_page_num;
  	int i;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->i915;
  	u32 ring_base;
  	u32 head, tail;
  	u16 wrap_count;
@@ -817,14 +817,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; - ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+	ring_base = i915->engine[workload->ring_id]->mmio_base;
  	vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
  	vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size;
  	context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+	if (IS_BROADWELL(gvt->i915) && rq->engine->id == RCS0)
  		context_page_num = 19;
i = 2;
@@ -875,13 +875,13 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
  				intel_engine_mask_t engine_mask)
  {
  	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	struct intel_engine_cs *engine;
  	struct intel_vgpu_workload *pos, *n;
  	intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
-	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+	for_each_engine_masked(engine, i915, engine_mask, tmp) {
  		list_for_each_entry_safe(pos, n,
  			&s->workload_q_head[engine->id], list) {
  			list_del_init(&pos->list);
@@ -986,7 +986,7 @@ static int workload_thread(void *priv)
  	struct intel_vgpu_workload *workload = NULL;
  	struct intel_vgpu *vgpu = NULL;
  	int ret;
-	bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
+	bool need_force_wake = (INTEL_GEN(gvt->i915) >= 9);
  	DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -1015,7 +1015,7 @@ static int workload_thread(void *priv)
  				workload->ring_id, workload);
if (need_force_wake)
-			intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
+			intel_uncore_forcewake_get(&gvt->i915->uncore,
  					FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1037,7 +1037,7 @@ static int workload_thread(void *priv)
  		complete_current_workload(gvt, ring_id);
if (need_force_wake)
-			intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
+			intel_uncore_forcewake_put(&gvt->i915->uncore,
  					FORCEWAKE_ALL);
if (ret && (vgpu_is_vm_unhealthy(ret)))
@@ -1068,7 +1068,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n"); - for_each_engine(engine, gvt->dev_priv, i) {
+	for_each_engine(engine, gvt->i915, i) {
  		atomic_notifier_chain_unregister(
  					&engine->context_status_notifier,
  					&gvt->shadow_ctx_notifier_block[i]);
@@ -1088,7 +1088,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq); - for_each_engine(engine, gvt->dev_priv, i) {
+	for_each_engine(engine, gvt->i915, i) {
  		init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -1152,7 +1152,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
  	intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
-	for_each_engine(engine, vgpu->gvt->dev_priv, id)
+	for_each_engine(engine, vgpu->gvt->i915, id)
  		intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads);
@@ -1212,13 +1212,13 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
  	enum intel_engine_id i;
  	int ret;
- ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
+	ctx = i915_gem_context_create_gvt(&vgpu->gvt->i915->drm);
  	if (IS_ERR(ctx))
  		return PTR_ERR(ctx);
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm)); - for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+	for_each_engine(engine, vgpu->gvt->i915, i) {
  		struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1260,7 +1260,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx:
  	i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+	for_each_engine(engine, vgpu->gvt->i915, i) {
  		if (IS_ERR(s->shadow[i]))
  			break;
@@ -1439,7 +1439,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
  	struct list_head *q = workload_q_head(vgpu, ring_id);
  	struct intel_vgpu_workload *last_workload = get_last_workload(q);
  	struct intel_vgpu_workload *workload = NULL;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->i915;
  	u64 ring_context_gpa;
  	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
  	u32 guest_head;
@@ -1526,11 +1526,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
  	 * as there is only one pre-allocated buf-obj for shadow.
  	 */
  	if (list_empty(workload_q_head(vgpu, ring_id))) {
-		intel_runtime_pm_get(dev_priv);
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		intel_runtime_pm_get(i915);
+		mutex_lock(&i915->drm.struct_mutex);
  		ret = intel_gvt_scan_and_shadow_workload(workload);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-		intel_runtime_pm_put_unchecked(dev_priv);
+		mutex_unlock(&i915->drm.struct_mutex);
+		intel_runtime_pm_put_unchecked(i915);
  	}
if (ret) {
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 44ce3c2b9ac1..a311737c9633 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
  		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
  						   high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->dev_priv, 8))
+		if (IS_GEN(gvt->i915, 8))
  			sprintf(gvt->types[i].name, "GVTg_V4_%s",
  						vgpu_types[i].name);
-		else if (IS_GEN(gvt->dev_priv, 9))
+		else if (IS_GEN(gvt->i915, 9))
  			sprintf(gvt->types[i].name, "GVTg_V5_%s",
  						vgpu_types[i].name);
@@ -429,7 +429,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
  		goto out_clean_sched_policy;
/*TODO: add more platforms support */
-	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+	if (IS_SKYLAKE(gvt->i915) || IS_KABYLAKE(gvt->i915))
  		ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
  	if (ret)
  		goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a28bcd2d7c09..e9d076167b24 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1342,21 +1342,21 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
/**
   * i915_cmd_parser_get_version() - get the cmd parser version number
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   *
   * The cmd parser maintains a simple increasing integer version number suitable
   * for passing to userspace clients to determine what operations are permitted.
   *
   * Return: the current version number of the cmd parser
   */
-int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
+int i915_cmd_parser_get_version(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
-	for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		if (intel_engine_needs_cmd_parser(engine)) {
  			active = true;
  			break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 026fb46020f6..fec01858685d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -55,17 +55,17 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_device_info *info = INTEL_INFO(dev_priv);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const struct intel_device_info *info = INTEL_INFO(i915);
  	struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+	seq_printf(m, "gen: %d\n", INTEL_GEN(i915));
  	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
-	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
intel_device_info_dump_flags(info, &p);
-	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
-	intel_driver_caps_print(&dev_priv->caps, &p);
+	intel_device_info_dump_runtime(RUNTIME_INFO(i915), &p);
+	intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
  	i915_params_dump(&i915_modparams, &p);
@@ -137,7 +137,7 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
  static void
  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct intel_engine_cs *engine;
  	struct i915_vma *vma;
  	unsigned int frontbuffer_bits;
@@ -155,7 +155,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  		   obj->base.size / 1024,
  		   obj->read_domains,
  		   obj->write_domain,
-		   i915_cache_level_str(dev_priv, obj->cache_level),
+		   i915_cache_level_str(i915, obj->cache_level),
  		   obj->mm.dirty ? " dirty" : "",
  		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
  	if (obj->base.name)
@@ -296,7 +296,7 @@ static int per_file_stats(int id, void *ptr, void *data)
  } while (0)
static void print_batch_pool_stats(struct seq_file *m,
-				   struct drm_i915_private *dev_priv)
+				   struct drm_i915_private *i915)
  {
  	struct drm_i915_gem_object *obj;
  	struct intel_engine_cs *engine;
@@ -304,7 +304,7 @@ static void print_batch_pool_stats(struct seq_file *m,
  	enum intel_engine_id id;
  	int j;
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
  			list_for_each_entry(obj,
  					    &engine->batch_pool.cache_list[j],
@@ -382,8 +382,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct drm_i915_gem_object *obj;
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -394,7 +394,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
  	if (ret)
  		return ret;
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
  			int count;
@@ -427,15 +427,15 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) static void gen8_display_interrupt_info(struct seq_file *m)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	int pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		enum intel_display_power_domain power_domain;
  		intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
-		wakeref = intel_display_power_get_if_enabled(dev_priv,
+		wakeref = intel_display_power_get_if_enabled(i915,
  							     power_domain);
  		if (!wakeref) {
  			seq_printf(m, "Pipe %c power disabled\n",
@@ -452,7 +452,7 @@ static void gen8_display_interrupt_info(struct seq_file *m)
  			   pipe_name(pipe),
  			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
- intel_display_power_put(dev_priv, power_domain, wakeref);
+		intel_display_power_put(i915, power_domain, wakeref);
  	}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -479,15 +479,15 @@ static void gen8_display_interrupt_info(struct seq_file *m)
static int i915_interrupt_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	intel_wakeref_t wakeref;
  	int i, pipe;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		intel_wakeref_t pref;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
@@ -501,11 +501,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			   I915_READ(VLV_IIR_RW));
  		seq_printf(m, "Display IMR:\t%08x\n",
  			   I915_READ(VLV_IMR));
-		for_each_pipe(dev_priv, pipe) {
+		for_each_pipe(i915, pipe) {
  			enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
-			pref = intel_display_power_get_if_enabled(dev_priv,
+			pref = intel_display_power_get_if_enabled(i915,
  								  power_domain);
  			if (!pref) {
  				seq_printf(m, "Pipe %c power disabled\n",
@@ -517,17 +517,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  				   pipe_name(pipe),
  				   I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain, pref);
+			intel_display_power_put(i915, power_domain, pref);
  		}
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+		pref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
  		seq_printf(m, "Port hotplug:\t%08x\n",
  			   I915_READ(PORT_HOTPLUG_EN));
  		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
  			   I915_READ(VLV_DPFLIPSTAT));
  		seq_printf(m, "DPINVGTT:\t%08x\n",
  			   I915_READ(DPINVGTT));
-		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
+		intel_display_power_put(i915, POWER_DOMAIN_INIT, pref);
for (i = 0; i < 4; i++) {
  			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -544,7 +544,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			   I915_READ(GEN8_PCU_IIR));
  		seq_printf(m, "PCU interrupt enable:\t%08x\n",
  			   I915_READ(GEN8_PCU_IER));
-	} else if (INTEL_GEN(dev_priv) >= 11) {
+	} else if (INTEL_GEN(i915) >= 11) {
  		seq_printf(m, "Master Interrupt Control:  %08x\n",
  			   I915_READ(GEN11_GFX_MSTR_IRQ));
@@ -565,7 +565,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			   I915_READ(GEN11_DISPLAY_INT_CTL));
gen8_display_interrupt_info(m);
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+	} else if (INTEL_GEN(i915) >= 8) {
  		seq_printf(m, "Master Interrupt Control:\t%08x\n",
  			   I915_READ(GEN8_MASTER_IRQ));
@@ -579,7 +579,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  		}
gen8_display_interrupt_info(m);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		seq_printf(m, "Display IER:\t%08x\n",
  			   I915_READ(VLV_IER));
  		seq_printf(m, "Display IIR:\t%08x\n",
@@ -588,12 +588,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			   I915_READ(VLV_IIR_RW));
  		seq_printf(m, "Display IMR:\t%08x\n",
  			   I915_READ(VLV_IMR));
-		for_each_pipe(dev_priv, pipe) {
+		for_each_pipe(i915, pipe) {
  			enum intel_display_power_domain power_domain;
  			intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
-			pref = intel_display_power_get_if_enabled(dev_priv,
+			pref = intel_display_power_get_if_enabled(i915,
  								  power_domain);
  			if (!pref) {
  				seq_printf(m, "Pipe %c power disabled\n",
@@ -604,7 +604,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			seq_printf(m, "Pipe %c stat:\t%08x\n",
  				   pipe_name(pipe),
  				   I915_READ(PIPESTAT(pipe)));
-			intel_display_power_put(dev_priv, power_domain, pref);
+			intel_display_power_put(i915, power_domain, pref);
  		}
seq_printf(m, "Master IER:\t%08x\n",
@@ -631,14 +631,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  		seq_printf(m, "DPINVGTT:\t%08x\n",
  			   I915_READ(DPINVGTT));
- } else if (!HAS_PCH_SPLIT(dev_priv)) {
+	} else if (!HAS_PCH_SPLIT(i915)) {
  		seq_printf(m, "Interrupt enable:    %08x\n",
  			   I915_READ(GEN2_IER));
  		seq_printf(m, "Interrupt identity:  %08x\n",
  			   I915_READ(GEN2_IIR));
  		seq_printf(m, "Interrupt mask:      %08x\n",
  			   I915_READ(GEN2_IMR));
-		for_each_pipe(dev_priv, pipe)
+		for_each_pipe(i915, pipe)
  			seq_printf(m, "Pipe %c stat:         %08x\n",
  				   pipe_name(pipe),
  				   I915_READ(PIPESTAT(pipe)));
@@ -663,7 +663,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  			   I915_READ(GTIMR));
  	}
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		seq_printf(m, "RCS Intr Mask:\t %08x\n",
  			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
  		seq_printf(m, "BCS Intr Mask:\t %08x\n",
@@ -683,35 +683,35 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
  		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
  			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
- } else if (INTEL_GEN(dev_priv) >= 6) {
-		for_each_engine(engine, dev_priv, id) {
+	} else if (INTEL_GEN(i915) >= 6) {
+		for_each_engine(engine, i915, id) {
  			seq_printf(m,
  				   "Graphics Interrupt mask (%s):	%08x\n",
  				   engine->name, ENGINE_READ(engine, RING_IMR));
  		}
  	}
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
  	if (ret)
  		return ret;
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
+	seq_printf(m, "Total fences = %d\n", i915->num_fence_regs);
+	for (i = 0; i < i915->num_fence_regs; i++) {
+		struct i915_vma *vma = i915->fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
-			   i, dev_priv->fence_regs[i].pin_count);
+			   i, i915->fence_regs[i].pin_count);
  		if (!vma)
  			seq_puts(m, "unused");
  		else
@@ -825,15 +825,15 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	intel_wakeref_t wakeref;
  	int ret = 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
- if (IS_GEN(dev_priv, 5)) {
+	if (IS_GEN(i915, 5)) {
  		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
  		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
@@ -843,7 +843,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  			   MEMSTAT_VID_SHIFT);
  		seq_printf(m, "Current P-state: %d\n",
  			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		u32 rpmodectl, freq_sts;
rpmodectl = I915_READ(GEN6_RP_CONTROL);
@@ -855,32 +855,32 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
  				  GEN6_RP_MEDIA_SW_MODE));
- vlv_punit_get(dev_priv);
-		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		vlv_punit_put(dev_priv);
+		vlv_punit_get(i915);
+		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+		vlv_punit_put(i915);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
-		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+		seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+			   intel_gpu_freq(i915, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->cur_freq));
+			   intel_gpu_freq(i915, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(i915, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->min_freq));
+			   intel_gpu_freq(i915, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->idle_freq));
+			   intel_gpu_freq(i915, rps->idle_freq));
seq_printf(m,
  			   "efficient (RPe) frequency: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->efficient_freq));
-	} else if (INTEL_GEN(dev_priv) >= 6) {
+			   intel_gpu_freq(i915, rps->efficient_freq));
+	} else if (INTEL_GEN(i915) >= 6) {
  		u32 rp_state_limits;
  		u32 gt_perf_status;
  		u32 rp_state_cap;
@@ -892,7 +892,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  		int max_freq;
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		if (IS_GEN9_LP(dev_priv)) {
+		if (IS_GEN9_LP(i915)) {
  			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
  			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
  		} else {
@@ -901,19 +901,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  		}
/* RPSTAT1 is in the GT power well */
-		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			reqf >>= 23;
  		else {
  			reqf &= ~GEN6_TURBO_DISABLE;
-			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+			if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  				reqf >>= 24;
  			else
  				reqf >>= 25;
  		}
-		reqf = intel_gpu_freq(dev_priv, reqf);
+		reqf = intel_gpu_freq(i915, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
  		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -926,12 +926,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
  		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
  		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		cagf = intel_gpu_freq(dev_priv,
-				      intel_get_cagf(dev_priv, rpstat));
+		cagf = intel_gpu_freq(i915,
+				      intel_get_cagf(i915, rpstat));
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- if (INTEL_GEN(dev_priv) >= 11) {
+		if (INTEL_GEN(i915) >= 11) {
  			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
  			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
  			/*
@@ -940,7 +940,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  			 */
  			pm_isr = 0;
  			pm_iir = 0;
-		} else if (INTEL_GEN(dev_priv) >= 8) {
+		} else if (INTEL_GEN(i915) >= 8) {
  			pm_ier = I915_READ(GEN8_GT_IER(2));
  			pm_imr = I915_READ(GEN8_GT_IMR(2));
  			pm_isr = I915_READ(GEN8_GT_ISR(2));
@@ -963,14 +963,14 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
  			   pm_ier, pm_imr, pm_mask);
-		if (INTEL_GEN(dev_priv) <= 10)
+		if (INTEL_GEN(i915) <= 10)
  			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
  				   pm_isr, pm_iir);
  		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
  			   rps->pm_intrmsk_mbz);
  		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
  		seq_printf(m, "Render p-state ratio: %d\n",
-			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+			   (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
  		seq_printf(m, "Render p-state VID: %d\n",
  			   gt_perf_status & 0xff);
  		seq_printf(m, "Render p-state limit: %d\n",
@@ -982,72 +982,72 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
  		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
  		seq_printf(m, "CAGF: %dMHz\n", cagf);
  		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
-			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
+			   rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
  		seq_printf(m, "RP CUR UP: %d (%dus)\n",
-			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
+			   rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
  		seq_printf(m, "RP PREV UP: %d (%dus)\n",
-			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
+			   rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
  		seq_printf(m, "Up threshold: %d%%\n",
  			   rps->power.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
-			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
+			   rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
  		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
-			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
+			   rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
  		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
-			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
+			   rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
  		seq_printf(m, "Down threshold: %d%%\n",
  			   rps->power.down_threshold);
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
+		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
  			    rp_state_cap >> 16) & 0xff;
-		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
  		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(i915, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
-		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
  		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(i915, max_freq));
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
+		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
  			    rp_state_cap >> 0) & 0xff;
-		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
  		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(i915, max_freq));
  		seq_printf(m, "Max overclocked frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(i915, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->cur_freq));
+			   intel_gpu_freq(i915, rps->cur_freq));
  		seq_printf(m, "Actual freq: %d MHz\n", cagf);
  		seq_printf(m, "Idle freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->idle_freq));
+			   intel_gpu_freq(i915, rps->idle_freq));
  		seq_printf(m, "Min freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->min_freq));
+			   intel_gpu_freq(i915, rps->min_freq));
  		seq_printf(m, "Boost freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->boost_freq));
+			   intel_gpu_freq(i915, rps->boost_freq));
  		seq_printf(m, "Max freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(i915, rps->max_freq));
  		seq_printf(m,
  			   "efficient (RPe) frequency: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->efficient_freq));
+			   intel_gpu_freq(i915, rps->efficient_freq));
  	} else {
  		seq_puts(m, "no P-state info available\n");
  	}
- seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
-	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
-	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
+	seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+	seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+	seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  	return ret;
  }
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
+static void i915_instdone_info(struct drm_i915_private *i915,
  			       struct seq_file *m,
  			       struct intel_instdone *instdone)
  {
@@ -1057,37 +1057,37 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv,
  	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
  		   instdone->instdone);
- if (INTEL_GEN(dev_priv) <= 3)
+	if (INTEL_GEN(i915) <= 3)
  		return;
seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
  		   instdone->slice_common);
- if (INTEL_GEN(dev_priv) <= 6)
+	if (INTEL_GEN(i915) <= 6)
  		return;
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+	for_each_instdone_slice_subslice(i915, slice, subslice)
  		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
  			   slice, subslice, instdone->sampler[slice][subslice]);
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+	for_each_instdone_slice_subslice(i915, slice, subslice)
  		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
  			   slice, subslice, instdone->row[slice][subslice]);
  }
static int i915_hangcheck_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	struct intel_engine_cs *engine;
  	u64 acthd[I915_NUM_ENGINES];
  	struct intel_instdone instdone;
  	intel_wakeref_t wakeref;
  	enum intel_engine_id id;
- seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
-	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+	seq_printf(m, "Reset flags: %lx\n", i915->gpu_error.flags);
+	if (test_bit(I915_WEDGED, &i915->gpu_error.flags))
  		seq_puts(m, "\tWedged\n");
-	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
+	if (test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
  		seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
@@ -1095,25 +1095,25 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
  		return 0;
  	}
- with_intel_runtime_pm(dev_priv, wakeref) {
-		for_each_engine(engine, dev_priv, id)
+	with_intel_runtime_pm(i915, wakeref) {
+		for_each_engine(engine, i915, id)
  			acthd[id] = intel_engine_get_active_head(engine);
- intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
+		intel_engine_get_instdone(i915->engine[RCS0], &instdone);
  	}
- if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
+	if (timer_pending(&i915->gpu_error.hangcheck_work.timer))
  		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
-			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+			   jiffies_to_msecs(i915->gpu_error.hangcheck_work.timer.expires -
  					    jiffies));
-	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
+	else if (delayed_work_pending(&i915->gpu_error.hangcheck_work))
  		seq_puts(m, "Hangcheck active, work pending\n");
  	else
  		seq_puts(m, "Hangcheck inactive\n");
- seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
+	seq_printf(m, "GT active? %s\n", yesno(i915->gt.awake));
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		seq_printf(m, "%s: %d ms ago\n",
  			   engine->name,
  			   jiffies_to_msecs(jiffies -
@@ -1126,11 +1126,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
  		if (engine->id == RCS0) {
  			seq_puts(m, "\tinstdone read =\n");
- i915_instdone_info(dev_priv, m, &instdone);
+			i915_instdone_info(i915, m, &instdone);
seq_puts(m, "\tinstdone accu =\n"); - i915_instdone_info(dev_priv, m,
+			i915_instdone_info(i915, m,
  					   &engine->hangcheck.instdone);
  		}
  	}
@@ -1140,14 +1140,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int i915_reset_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_gpu_error *error = &dev_priv->gpu_error;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct i915_gpu_error *error = &i915->gpu_error;
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); - for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		seq_printf(m, "%s = %u\n", engine->name,
  			   i915_reset_engine_count(error, engine));
  	}
@@ -1235,16 +1235,16 @@ static void print_rc6_res(struct seq_file *m,
  			  const char *title,
  			  const i915_reg_t reg)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
seq_printf(m, "%s %u (%llu us)\n",
  		   title, I915_READ(reg),
-		   intel_rc6_residency_us(dev_priv, reg));
+		   intel_rc6_residency_us(i915, reg));
  }
static int vlv_drpc_info(struct seq_file *m)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	u32 rcctl1, pw_status;
pw_status = I915_READ(VLV_GTLC_PW_STATUS);
@@ -1266,7 +1266,7 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	u32 gt_core_status, rcctl1, rc6vids = 0;
  	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
@@ -1274,20 +1274,20 @@ static int gen6_drpc_info(struct seq_file *m)
  	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
  		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
  	}
- if (INTEL_GEN(dev_priv) <= 7)
-		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+	if (INTEL_GEN(i915) <= 7)
+		sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
  				       &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
  		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
  	seq_printf(m, "RC6 Enabled: %s\n",
  		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		seq_printf(m, "Render Well Gating Enabled: %s\n",
  			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
  		seq_printf(m, "Media Well Gating Enabled: %s\n",
@@ -1321,7 +1321,7 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
  		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		seq_printf(m, "Render Power Well: %s\n",
  			(gen9_powergate_status &
  			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
@@ -1337,7 +1337,7 @@ static int gen6_drpc_info(struct seq_file *m)
  	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
  	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
- if (INTEL_GEN(dev_priv) <= 7) {
+	if (INTEL_GEN(i915) <= 7) {
  		seq_printf(m, "RC6   voltage: %dmV\n",
  			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
  		seq_printf(m, "RC6+  voltage: %dmV\n",
@@ -1351,14 +1351,14 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
  	int err = -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	with_intel_runtime_pm(i915, wakeref) {
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  			err = vlv_drpc_info(m);
-		else if (INTEL_GEN(dev_priv) >= 6)
+		else if (INTEL_GEN(i915) >= 6)
  			err = gen6_drpc_info(m);
  		else
  			err = ironlake_drpc_info(m);
@@ -1369,44 +1369,44 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
-		   dev_priv->fb_tracking.busy_bits);
+		   i915->fb_tracking.busy_bits);
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
-		   dev_priv->fb_tracking.flip_bits);
+		   i915->fb_tracking.flip_bits);
return 0;
  }
static int i915_fbc_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_fbc *fbc = &i915->fbc;
  	intel_wakeref_t wakeref;
- if (!HAS_FBC(dev_priv))
+	if (!HAS_FBC(i915))
  		return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	mutex_lock(&fbc->lock);
- if (intel_fbc_is_active(dev_priv))
+	if (intel_fbc_is_active(i915))
  		seq_puts(m, "FBC enabled\n");
  	else
  		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
- if (intel_fbc_is_active(dev_priv)) {
+	if (intel_fbc_is_active(i915)) {
  		u32 mask;
- if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(i915) >= 8)
  			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
-		else if (INTEL_GEN(dev_priv) >= 7)
+		else if (INTEL_GEN(i915) >= 7)
  			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
-		else if (INTEL_GEN(dev_priv) >= 5)
+		else if (INTEL_GEN(i915) >= 5)
  			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
-		else if (IS_G4X(dev_priv))
+		else if (IS_G4X(i915))
  			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
  		else
  			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
@@ -1416,41 +1416,41 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
  	}
mutex_unlock(&fbc->lock);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_fbc_false_color_get(void *data, u64 *val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+	if (INTEL_GEN(i915) < 7 || !HAS_FBC(i915))
  		return -ENODEV;
- *val = dev_priv->fbc.false_color;
+	*val = i915->fbc.false_color;
return 0;
  }
static int i915_fbc_false_color_set(void *data, u64 val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
  	u32 reg;
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+	if (INTEL_GEN(i915) < 7 || !HAS_FBC(i915))
  		return -ENODEV;
- mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&i915->fbc.lock);
reg = I915_READ(ILK_DPFC_CONTROL);
-	dev_priv->fbc.false_color = val;
+	i915->fbc.false_color = val;
I915_WRITE(ILK_DPFC_CONTROL, val ?
  		   (reg | FBC_CTL_FALSE_COLOR) :
  		   (reg & ~FBC_CTL_FALSE_COLOR));
- mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&i915->fbc.lock);
  	return 0;
  }
@@ -1460,18 +1460,18 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, static int i915_ips_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
- if (!HAS_IPS(dev_priv))
+	if (!HAS_IPS(i915))
  		return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
seq_printf(m, "Enabled by kernel parameter: %s\n",
  		   yesno(i915_modparams.enable_ips));
- if (INTEL_GEN(dev_priv) >= 8) {
+	if (INTEL_GEN(i915) >= 8) {
  		seq_puts(m, "Currently: unknown\n");
  	} else {
  		if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1480,34 +1480,34 @@ static int i915_ips_status(struct seq_file *m, void *unused)
  			seq_puts(m, "Currently: disabled\n");
  	}
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_sr_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
  	bool sr_enabled = false;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		/* no global SR status; inspect per-plane WM */;
-	else if (HAS_PCH_SPLIT(dev_priv))
+	else if (HAS_PCH_SPLIT(i915))
  		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
-		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+	else if (IS_I965GM(i915) || IS_G4X(i915) ||
+		 IS_I945G(i915) || IS_I945GM(i915))
  		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-	else if (IS_I915GM(dev_priv))
+	else if (IS_I915GM(i915))
  		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-	else if (IS_PINEVIEW(dev_priv))
+	else if (IS_PINEVIEW(i915))
  		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); @@ -1540,18 +1540,18 @@ static int i915_emon_status(struct seq_file *m, void *unused) static int i915_ring_freq_table(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	unsigned int max_gpu_freq, min_gpu_freq;
  	intel_wakeref_t wakeref;
  	int gpu_freq, ia_freq;
- if (!HAS_LLC(dev_priv))
+	if (!HAS_LLC(i915))
  		return -ENODEV;
min_gpu_freq = rps->min_freq;
  	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
  		/* Convert GT frequency to 50 HZ units */
  		min_gpu_freq /= GEN9_FREQ_SCALER;
  		max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -1559,30 +1559,30 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
  		ia_freq = gpu_freq;
-		sandybridge_pcode_read(dev_priv,
+		sandybridge_pcode_read(i915,
  				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
  				       &ia_freq, NULL);
  		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(dev_priv, (gpu_freq *
-						     (IS_GEN9_BC(dev_priv) ||
-						      INTEL_GEN(dev_priv) >= 10 ?
+			   intel_gpu_freq(i915, (gpu_freq *
+						     (IS_GEN9_BC(i915) ||
+						      INTEL_GEN(i915) >= 10 ?
  						      GEN9_FREQ_SCALER : 1))),
  			   ((ia_freq >> 0) & 0xff) * 100,
  			   ((ia_freq >> 8) & 0xff) * 100);
  	}
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_opregion(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
+	struct intel_opregion *opregion = &i915->opregion;
  	int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1610,8 +1610,8 @@ static int i915_vbt(struct seq_file *m, void *unused)
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct intel_framebuffer *fbdev_fb = NULL;
  	struct drm_framebuffer *drm_fb;
  	int ret;
@@ -1621,8 +1621,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  		return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
-		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+	if (i915->fbdev && i915->fbdev->helper.fb) {
+		fbdev_fb = to_intel_framebuffer(i915->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
  			   fbdev_fb->base.width,
@@ -1666,8 +1666,8 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct i915_gem_context *ctx;
  	int ret;
@@ -1675,7 +1675,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
  	if (ret)
  		return ret;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+	list_for_each_entry(ctx, &i915->contexts.list, link) {
  		struct i915_gem_engines_iter it;
  		struct intel_context *ce;
@@ -1746,18 +1746,18 @@ static const char *swizzle_string(unsigned swizzle) static int i915_swizzle_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_uncore *uncore = &i915->uncore;
  	intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
-		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+		   swizzle_string(i915->mm.bit_6_swizzle_x));
  	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
-		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+		   swizzle_string(i915->mm.bit_6_swizzle_y));
- if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+	if (IS_GEN_RANGE(i915, 3, 4)) {
  		seq_printf(m, "DDC = 0x%08x\n",
  			   intel_uncore_read(uncore, DCC));
  		seq_printf(m, "DDC2 = 0x%08x\n",
@@ -1766,7 +1766,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
  			   intel_uncore_read16(uncore, C0DRB3));
  		seq_printf(m, "C1DRB3 = 0x%04x\n",
  			   intel_uncore_read16(uncore, C1DRB3));
-	} else if (INTEL_GEN(dev_priv) >= 6) {
+	} else if (INTEL_GEN(i915) >= 6) {
  		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
  			   intel_uncore_read(uncore, MAD_DIMM_C0));
  		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1775,7 +1775,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
  			   intel_uncore_read(uncore, MAD_DIMM_C2));
  		seq_printf(m, "TILECTL = 0x%08x\n",
  			   intel_uncore_read(uncore, TILECTL));
-		if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(i915) >= 8)
  			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
  				   intel_uncore_read(uncore, GAMTARBMODE));
  		else
@@ -1785,10 +1785,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
  			   intel_uncore_read(uncore, DISP_ARB_CTL));
  	}
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
  		seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
@@ -1809,54 +1809,54 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 act_freq = rps->cur_freq;
  	intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-			vlv_punit_get(dev_priv);
-			act_freq = vlv_punit_read(dev_priv,
+	with_intel_runtime_pm_if_in_use(i915, wakeref) {
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+			vlv_punit_get(i915);
+			act_freq = vlv_punit_read(i915,
  						  PUNIT_REG_GPU_FREQ_STS);
-			vlv_punit_put(dev_priv);
+			vlv_punit_put(i915);
  			act_freq = (act_freq >> 8) & 0xff;
  		} else {
-			act_freq = intel_get_cagf(dev_priv,
+			act_freq = intel_get_cagf(i915,
  						  I915_READ(GEN6_RPSTAT1));
  		}
  	}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
-	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
+	seq_printf(m, "GPU busy? %s\n", yesno(i915->gt.awake));
  	seq_printf(m, "Boosts outstanding? %d\n",
  		   atomic_read(&rps->num_waiters));
  	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
  	seq_printf(m, "Frequency requested %d, actual %d\n",
-		   intel_gpu_freq(dev_priv, rps->cur_freq),
-		   intel_gpu_freq(dev_priv, act_freq));
+		   intel_gpu_freq(i915, rps->cur_freq),
+		   intel_gpu_freq(i915, act_freq));
  	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-		   intel_gpu_freq(dev_priv, rps->min_freq),
-		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
-		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
-		   intel_gpu_freq(dev_priv, rps->max_freq));
+		   intel_gpu_freq(i915, rps->min_freq),
+		   intel_gpu_freq(i915, rps->min_freq_softlimit),
+		   intel_gpu_freq(i915, rps->max_freq_softlimit),
+		   intel_gpu_freq(i915, rps->max_freq));
  	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
-		   intel_gpu_freq(dev_priv, rps->idle_freq),
-		   intel_gpu_freq(dev_priv, rps->efficient_freq),
-		   intel_gpu_freq(dev_priv, rps->boost_freq));
+		   intel_gpu_freq(i915, rps->idle_freq),
+		   intel_gpu_freq(i915, rps->efficient_freq),
+		   intel_gpu_freq(i915, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); - if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
+	if (INTEL_GEN(i915) >= 6 && rps->enabled && i915->gt.awake) {
  		u32 rpup, rpupei;
  		u32 rpdown, rpdownei;
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
  		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
  		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
  		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
  		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
-		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
  			   rps_power_to_str(rps->power.mode));
@@ -1875,29 +1875,29 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
static int i915_llc(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const bool edram = INTEL_GEN(dev_priv) > 8;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const bool edram = INTEL_GEN(i915) > 8;
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
+	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
  	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
-		   dev_priv->edram_size_mb);
+		   i915->edram_size_mb);
return 0;
  }
static int i915_huc_load_status_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
  	struct drm_printer p;
- if (!HAS_HUC(dev_priv))
+	if (!HAS_HUC(i915))
  		return -ENODEV;
p = drm_seq_file_printer(m);
-	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
+	intel_uc_fw_dump(&i915->huc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
@@ -1905,17 +1905,17 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
  	struct drm_printer p;
- if (!HAS_GUC(dev_priv))
+	if (!HAS_GUC(i915))
  		return -ENODEV;
p = drm_seq_file_printer(m);
-	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
+	intel_uc_fw_dump(&i915->guc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		u32 tmp = I915_READ(GUC_STATUS);
  		u32 i;
@@ -1954,9 +1954,9 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
  }
static void i915_guc_log_info(struct seq_file *m,
-			      struct drm_i915_private *dev_priv)
+			      struct drm_i915_private *i915)
  {
-	struct intel_guc_log *log = &dev_priv->guc.log;
+	struct intel_guc_log *log = &i915->guc.log;
  	enum guc_log_buffer_type type;
if (!intel_guc_log_relay_enabled(log)) {
@@ -1978,7 +1978,7 @@ static void i915_guc_log_info(struct seq_file *m,
  }
static void i915_guc_client_info(struct seq_file *m,
-				 struct drm_i915_private *dev_priv,
+				 struct drm_i915_private *i915,
  				 struct intel_guc_client *client)
  {
  	struct intel_engine_cs *engine;
@@ -1990,7 +1990,7 @@ static void i915_guc_client_info(struct seq_file *m,
  	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
  		client->doorbell_id, client->doorbell_offset);
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		u64 submissions = client->submissions[id];
  		tot += submissions;
  		seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2001,15 +2001,15 @@ static void i915_guc_client_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_guc *guc = &dev_priv->guc;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
  		return -ENODEV;
- i915_guc_log_info(m, dev_priv);
+	i915_guc_log_info(m, i915);
- if (!USES_GUC_SUBMISSION(dev_priv))
+	if (!USES_GUC_SUBMISSION(i915))
  		return 0;
GEM_BUG_ON(!guc->execbuf_client);
@@ -2019,11 +2019,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
  	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
-	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
+	i915_guc_client_info(m, i915, guc->execbuf_client);
  	if (guc->preempt_client) {
  		seq_printf(m, "\nGuC preempt client @ %p:\n",
  			   guc->preempt_client);
-		i915_guc_client_info(m, dev_priv, guc->preempt_client);
+		i915_guc_client_info(m, i915, guc->preempt_client);
  	}
/* Add more as required ... */
@@ -2033,14 +2033,14 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_guc *guc = &dev_priv->guc;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const struct intel_guc *guc = &i915->guc;
  	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
  	struct intel_guc_client *client = guc->execbuf_client;
  	intel_engine_mask_t tmp;
  	int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+	if (!USES_GUC_SUBMISSION(i915))
  		return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -2066,7 +2066,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
  			   desc->wq_addr, desc->wq_size);
  		seq_putc(m, '\n');
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+		for_each_engine_masked(engine, i915, client->engines, tmp) {
  			u32 guc_engine_id = engine->guc_id;
  			struct guc_execlist_context *lrc =
  						&desc->lrc[guc_engine_id];
@@ -2088,19 +2088,19 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
  static int i915_guc_log_dump(struct seq_file *m, void *data)
  {
  	struct drm_info_node *node = m->private;
-	struct drm_i915_private *dev_priv = node_to_i915(node);
+	struct drm_i915_private *i915 = node_to_i915(node);
  	bool dump_load_err = !!node->info_ent->data;
  	struct drm_i915_gem_object *obj = NULL;
  	u32 *log;
  	int i = 0;
- if (!HAS_GUC(dev_priv))
+	if (!HAS_GUC(i915))
  		return -ENODEV;
if (dump_load_err)
-		obj = dev_priv->guc.load_err_log;
-	else if (dev_priv->guc.log.vma)
-		obj = dev_priv->guc.log.vma->obj;
+		obj = i915->guc.load_err_log;
+	else if (i915->guc.log.vma)
+		obj = i915->guc.log.vma->obj;
if (!obj)
  		return 0;
@@ -2126,24 +2126,24 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_guc_log_level_get(void *data, u64 *val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
- if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
  		return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->guc.log);
+	*val = intel_guc_log_get_level(&i915->guc.log);
return 0;
  }
static int i915_guc_log_level_set(void *data, u64 val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
- if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
  		return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->guc.log, val);
+	return intel_guc_log_set_level(&i915->guc.log, val);
  }
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2152,14 +2152,14 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
  		return -ENODEV;
- file->private_data = &dev_priv->guc.log;
+	file->private_data = &i915->guc.log;
- return intel_guc_log_relay_open(&dev_priv->guc.log);
+	return intel_guc_log_relay_open(&i915->guc.log);
  }
static ssize_t
@@ -2177,9 +2177,9 @@ i915_guc_log_relay_write(struct file *filp,
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- intel_guc_log_relay_close(&dev_priv->guc.log);
+	intel_guc_log_relay_close(&i915->guc.log);
return 0;
  }
@@ -2205,12 +2205,12 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
  		"sink internal error",
  	};
  	struct drm_connector *connector = m->private;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_dp *intel_dp =
  		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
  	int ret;
- if (!CAN_PSR(dev_priv)) {
+	if (!CAN_PSR(i915)) {
  		seq_puts(m, "PSR Unsupported\n");
  		return -ENODEV;
  	}
@@ -2236,12 +2236,12 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
  DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+psr_source_status(struct drm_i915_private *i915, struct seq_file *m)
  {
  	u32 val, status_val;
  	const char *status = "unknown";
- if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		static const char * const live_status[] = {
  			"IDLE",
  			"CAPTURE",
@@ -2283,14 +2283,14 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
static int i915_edp_psr_status(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct i915_psr *psr = &i915->psr;
  	intel_wakeref_t wakeref;
  	const char *status;
  	bool enabled;
  	u32 val;
- if (!HAS_PSR(dev_priv))
+	if (!HAS_PSR(i915))
  		return -ENODEV;
seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
@@ -2301,7 +2301,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
  	if (!psr->sink_support)
  		return 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	mutex_lock(&psr->lock);
if (psr->enabled)
@@ -2322,14 +2322,14 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
  	}
  	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
  		   enableddisabled(enabled), val);
-	psr_source_status(dev_priv, m);
+	psr_source_status(i915, m);
  	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
  		   psr->busy_frontbuffer_bits);
/*
  	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
  	 */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
  		seq_printf(m, "Performance counter: %u\n", val);
  	}
@@ -2365,7 +2365,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
unlock:
  	mutex_unlock(&psr->lock);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
@@ -2373,20 +2373,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
  static int
  i915_edp_psr_debug_set(void *data, u64 val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
  	intel_wakeref_t wakeref;
  	int ret;
- if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(i915))
  		return -ENODEV;
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
- ret = intel_psr_debug_set(dev_priv, val);
+	ret = intel_psr_debug_set(i915, val);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return ret;
  }
@@ -2394,12 +2394,12 @@ i915_edp_psr_debug_set(void *data, u64 val)
  static int
  i915_edp_psr_debug_get(void *data, u64 *val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
- if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(i915))
  		return -ENODEV;
- *val = READ_ONCE(dev_priv->psr.debug);
+	*val = READ_ONCE(i915->psr.debug);
  	return 0;
  }
@@ -2409,19 +2409,19 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, static int i915_energy_uJ(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	unsigned long long power;
  	intel_wakeref_t wakeref;
  	u32 units;
- if (INTEL_GEN(dev_priv) < 6)
+	if (INTEL_GEN(i915) < 6)
  		return -ENODEV;
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
  		return -ENODEV;
units = (power & 0x1f00) >> 8;
-	with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
@@ -2432,21 +2432,21 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct pci_dev *pdev = i915->drm.pdev;
- if (!HAS_RUNTIME_PM(dev_priv))
+	if (!HAS_RUNTIME_PM(i915))
  		seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
-		   enableddisabled(!dev_priv->power_domains.wakeref));
+		   enableddisabled(!i915->power_domains.wakeref));
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+	seq_printf(m, "GPU idle: %s\n", yesno(!i915->gt.awake));
  	seq_printf(m, "IRQs disabled: %s\n",
-		   yesno(!intel_irqs_enabled(dev_priv)));
+		   yesno(!intel_irqs_enabled(i915)));
  #ifdef CONFIG_PM
  	seq_printf(m, "Usage count: %d\n",
-		   atomic_read(&dev_priv->drm.dev->power.usage_count));
+		   atomic_read(&i915->drm.dev->power.usage_count));
  #else
  	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
  #endif
@@ -2457,7 +2457,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
  	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
  		struct drm_printer p = drm_seq_file_printer(m);
- print_intel_runtime_pm_wakeref(dev_priv, &p);
+		print_intel_runtime_pm_wakeref(i915, &p);
  	}
return 0;
@@ -2465,8 +2465,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
static int i915_power_domain_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	int i;
mutex_lock(&power_domains->lock);
@@ -2493,16 +2493,16 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
static int i915_dmc_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	intel_wakeref_t wakeref;
  	struct intel_csr *csr;
- if (!HAS_CSR(dev_priv))
+	if (!HAS_CSR(i915))
  		return -ENODEV;
- csr = &dev_priv->csr;
+	csr = &i915->csr;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
  	seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2513,13 +2513,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
  	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
  		   CSR_VERSION_MINOR(csr->version));
- if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+	if (WARN_ON(INTEL_GEN(i915) > 11))
  		goto out;
seq_printf(m, "DC3 -> DC5 count: %d\n",
-		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+		   I915_READ(IS_BROXTON(i915) ? BXT_CSR_DC3_DC5_COUNT :
  						    SKL_CSR_DC3_DC5_COUNT));
-	if (!IS_GEN9_LP(dev_priv))
+	if (!IS_GEN9_LP(i915))
  		seq_printf(m, "DC5 -> DC6 count: %d\n",
  			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
@@ -2528,7 +2528,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
  	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
  	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
@@ -2548,8 +2548,8 @@ static void intel_encoder_info(struct seq_file *m,
  			       struct intel_crtc *intel_crtc,
  			       struct intel_encoder *intel_encoder)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct drm_crtc *crtc = &intel_crtc->base;
  	struct intel_connector *intel_connector;
  	struct drm_encoder *encoder;
@@ -2575,8 +2575,8 @@ static void intel_encoder_info(struct seq_file *m,
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct drm_crtc *crtc = &intel_crtc->base;
  	struct intel_encoder *intel_encoder;
  	struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2731,8 +2731,8 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct intel_plane *intel_plane;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
@@ -2805,14 +2805,14 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
static int i915_display_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct intel_crtc *crtc;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
seq_printf(m, "CRTC info\n");
  	seq_printf(m, "---------\n");
@@ -2861,42 +2861,42 @@ static int i915_display_info(struct seq_file *m, void *unused)
  	drm_connector_list_iter_end(&conn_iter);
  	mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_engine_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	struct intel_engine_cs *engine;
  	intel_wakeref_t wakeref;
  	enum intel_engine_id id;
  	struct drm_printer p;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
seq_printf(m, "GT awake? %s [%d]\n",
-		   yesno(dev_priv->gt.awake),
-		   atomic_read(&dev_priv->gt.wakeref.count));
+		   yesno(i915->gt.awake),
+		   atomic_read(&i915->gt.wakeref.count));
  	seq_printf(m, "CS timestamp frequency: %u kHz\n",
-		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+		   RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return 0;
  }
static int i915_rcs_topology(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
+	intel_device_info_dump_topology(&RUNTIME_INFO(i915)->sseu, &p);
return 0;
  }
@@ -2913,13 +2913,13 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	int i;
drm_modeset_lock_all(dev);
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+	for (i = 0; i < i915->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &i915->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
  			   pll->info->id);
@@ -2977,28 +2977,28 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
static int i915_ipc_status_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
-			yesno(dev_priv->ipc_enabled));
+			yesno(i915->ipc_enabled));
  	return 0;
  }
static int i915_ipc_status_open(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- if (!HAS_IPC(dev_priv))
+	if (!HAS_IPC(i915))
  		return -ENODEV;
- return single_open(file, i915_ipc_status_show, dev_priv);
+	return single_open(file, i915_ipc_status_show, i915);
  }
static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
  				     size_t len, loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	intel_wakeref_t wakeref;
  	bool enable;
  	int ret;
@@ -3007,12 +3007,12 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
  	if (ret < 0)
  		return ret;
- with_intel_runtime_pm(dev_priv, wakeref) {
-		if (!dev_priv->ipc_enabled && enable)
+	with_intel_runtime_pm(i915, wakeref) {
+		if (!i915->ipc_enabled && enable)
  			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
-		dev_priv->wm.distrust_bios_wm = true;
-		dev_priv->ipc_enabled = enable;
-		intel_enable_ipc(dev_priv);
+		i915->wm.distrust_bios_wm = true;
+		i915->ipc_enabled = enable;
+		intel_enable_ipc(i915);
  	}
return len;
@@ -3029,19 +3029,19 @@ static const struct file_operations i915_ipc_status_fops = {
static int i915_ddb_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct skl_ddb_entry *entry;
  	struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) < 9)
+	if (INTEL_GEN(i915) < 9)
  		return -ENODEV;
drm_modeset_lock_all(dev); seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); - for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		enum pipe pipe = crtc->pipe;
@@ -3070,8 +3070,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
  				 struct drm_device *dev,
  				 struct intel_crtc *intel_crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_drrs *drrs = &dev_priv->drrs;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct i915_drrs *drrs = &i915->drrs;
  	int vrefresh = 0;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
@@ -3085,11 +3085,11 @@ static void drrs_status_per_crtc(struct seq_file *m,
  	}
  	drm_connector_list_iter_end(&conn_iter);
- if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+	if (i915->vbt.drrs_type == STATIC_DRRS_SUPPORT)
  		seq_puts(m, "\tVBT: DRRS_type: Static");
-	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+	else if (i915->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
  		seq_puts(m, "\tVBT: DRRS_type: Seamless");
-	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+	else if (i915->vbt.drrs_type == DRRS_NOT_SUPPORTED)
  		seq_puts(m, "\tVBT: DRRS_type: None");
  	else
  		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
@@ -3106,7 +3106,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
  		/* disable_drrs() will make drrs->dp NULL */
  		if (!drrs->dp) {
  			seq_puts(m, "Idleness DRRS: Disabled\n");
-			if (dev_priv->psr.enabled)
+			if (i915->psr.enabled)
  				seq_puts(m,
  				"\tAs PSR is enabled, DRRS is not enabled\n");
  			mutex_unlock(&drrs->mutex);
@@ -3143,8 +3143,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
static int i915_drrs_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct intel_crtc *intel_crtc;
  	int active_crtc_cnt = 0;
@@ -3167,8 +3167,8 @@ static int i915_drrs_status(struct seq_file *m, void *unused) static int i915_dp_mst_info(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct drm_device *dev = &i915->drm;
  	struct intel_encoder *intel_encoder;
  	struct intel_digital_port *intel_dig_port;
  	struct drm_connector *connector;
@@ -3257,8 +3257,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
static int i915_displayport_test_active_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = m->private;
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	struct intel_dp *intel_dp;
@@ -3307,8 +3307,8 @@ static const struct file_operations i915_displayport_test_active_fops = {
static int i915_displayport_test_data_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = m->private;
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	struct intel_dp *intel_dp;
@@ -3351,8 +3351,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
static int i915_displayport_test_type_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = m->private;
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	struct intel_dp *intel_dp;
@@ -3383,19 +3383,19 @@ DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
  {
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = m->private;
+	struct drm_device *dev = &i915->drm;
  	int level;
  	int num_levels;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		num_levels = 1;
-	else if (IS_G4X(dev_priv))
+	else if (IS_G4X(i915))
  		num_levels = 3;
  	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
+		num_levels = ilk_wm_max_level(i915) + 1;
drm_modeset_lock_all(dev); @@ -3406,10 +3406,10 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
  		 * - WM1+ latency values in 0.5us units
  		 * - latencies are in us on gen9/vlv/chv
  		 */
-		if (INTEL_GEN(dev_priv) >= 9 ||
-		    IS_VALLEYVIEW(dev_priv) ||
-		    IS_CHERRYVIEW(dev_priv) ||
-		    IS_G4X(dev_priv))
+		if (INTEL_GEN(i915) >= 9 ||
+		    IS_VALLEYVIEW(i915) ||
+		    IS_CHERRYVIEW(i915) ||
+		    IS_G4X(i915))
  			latency *= 10;
  		else if (level > 0)
  			latency *= 5;
@@ -3423,13 +3423,13 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.pri_latency;
+		latencies = i915->wm.pri_latency;
wm_latency_show(m, latencies); @@ -3438,13 +3438,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.spr_latency;
+		latencies = i915->wm.spr_latency;
wm_latency_show(m, latencies); @@ -3453,13 +3453,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.cur_latency;
+		latencies = i915->wm.cur_latency;
wm_latency_show(m, latencies); @@ -3468,54 +3468,54 @@ static int cur_wm_latency_show(struct seq_file *m, void *data) static int pri_wm_latency_open(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) < 5 && !IS_G4X(i915))
  		return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+	return single_open(file, pri_wm_latency_show, i915);
  }
static int spr_wm_latency_open(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+	return single_open(file, spr_wm_latency_show, i915);
  }
static int cur_wm_latency_open(struct inode *inode, struct file *file)
  {
-	struct drm_i915_private *dev_priv = inode->i_private;
+	struct drm_i915_private *i915 = inode->i_private;
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+	return single_open(file, cur_wm_latency_show, i915);
  }
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
  				size_t len, loff_t *offp, u16 wm[8])
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = m->private;
+	struct drm_device *dev = &i915->drm;
  	u16 new[8] = { 0 };
  	int num_levels;
  	int level;
  	int ret;
  	char tmp[32];
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		num_levels = 1;
-	else if (IS_G4X(dev_priv))
+	else if (IS_G4X(i915))
  		num_levels = 3;
  	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
+		num_levels = ilk_wm_max_level(i915) + 1;
if (len >= sizeof(tmp))
  		return -EINVAL;
@@ -3546,13 +3546,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
  				    size_t len, loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.pri_latency;
+		latencies = i915->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
  }
@@ -3561,13 +3561,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
  				    size_t len, loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.spr_latency;
+		latencies = i915->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
  }
@@ -3576,13 +3576,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
  				    size_t len, loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
  	u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
+	if (INTEL_GEN(i915) >= 9)
+		latencies = i915->wm.skl_latency;
  	else
-		latencies = dev_priv->wm.cur_latency;
+		latencies = i915->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
  }
@@ -3752,14 +3752,14 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
  static int
  i915_cache_sharing_get(void *data, u64 *val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
  	intel_wakeref_t wakeref;
  	u32 snpcr = 0;
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
+	if (!(IS_GEN_RANGE(i915, 6, 7)))
  		return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -3770,17 +3770,17 @@ i915_cache_sharing_get(void *data, u64 *val)
  static int
  i915_cache_sharing_set(void *data, u64 val)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
  	intel_wakeref_t wakeref;
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
+	if (!(IS_GEN_RANGE(i915, 6, 7)))
  		return -ENODEV;
if (val > 3)
  		return -EINVAL;
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
-	with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		u32 snpcr;
/* Update the cache sharing policy here as well */
@@ -3797,7 +3797,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
  			i915_cache_sharing_get, i915_cache_sharing_set,
  			"%llu\n");
-static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+static void cherryview_sseu_device_status(struct drm_i915_private *i915,
  					  struct sseu_dev_info *sseu)
  {
  #define SS_MAX 2
@@ -3830,11 +3830,11 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
  #undef SS_MAX
  }
-static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
+static void gen10_sseu_device_status(struct drm_i915_private *i915,
  				     struct sseu_dev_info *sseu)
  {
  #define SS_MAX 6
-	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
+	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
  	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
  	int s, ss;
@@ -3886,11 +3886,11 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
  #undef SS_MAX
  }
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+static void gen9_sseu_device_status(struct drm_i915_private *i915,
  				    struct sseu_dev_info *sseu)
  {
  #define SS_MAX 3
-	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
+	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
  	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
  	int s, ss;
@@ -3916,14 +3916,14 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); - if (IS_GEN9_BC(dev_priv))
+		if (IS_GEN9_BC(i915))
  			sseu->subslice_mask[s] =
-				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+				RUNTIME_INFO(i915)->sseu.subslice_mask[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
  			unsigned int eu_cnt;
- if (IS_GEN9_LP(dev_priv)) {
+			if (IS_GEN9_LP(i915)) {
  				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
  					/* skip disabled subslice */
  					continue;
@@ -3942,7 +3942,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
  #undef SS_MAX
  }
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+static void broadwell_sseu_device_status(struct drm_i915_private *i915,
  					 struct sseu_dev_info *sseu)
  {
  	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -3952,10 +3952,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
if (sseu->slice_mask) {
  		sseu->eu_per_subslice =
-			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
+			RUNTIME_INFO(i915)->sseu.eu_per_subslice;
  		for (s = 0; s < fls(sseu->slice_mask); s++) {
  			sseu->subslice_mask[s] =
-				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+				RUNTIME_INFO(i915)->sseu.subslice_mask[s];
  		}
  		sseu->eu_total = sseu->eu_per_subslice *
  				 intel_sseu_subslice_total(sseu);
@@ -3963,7 +3963,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
  		/* subtract fused off EU(s) from enabled slice(s) */
  		for (s = 0; s < fls(sseu->slice_mask); s++) {
  			u8 subslice_7eu =
-				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
+				RUNTIME_INFO(i915)->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
  		}
@@ -3973,7 +3973,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
  static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
  				 const struct sseu_dev_info *sseu)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	const char *type = is_available_info ? "Available" : "Enabled";
  	int s;
@@ -3995,8 +3995,8 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
  	if (!is_available_info)
  		return;
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
-	if (HAS_POOLED_EU(dev_priv))
+	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(i915)));
+	if (HAS_POOLED_EU(i915))
  		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
@@ -4009,32 +4009,32 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
static int i915_sseu_status(struct seq_file *m, void *unused)
  {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
  	struct sseu_dev_info sseu;
  	intel_wakeref_t wakeref;
- if (INTEL_GEN(dev_priv) < 8)
+	if (INTEL_GEN(i915) < 8)
  		return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
-	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
+	i915_print_sseu_info(m, true, &RUNTIME_INFO(i915)->sseu);
seq_puts(m, "SSEU Device Status\n");
  	memset(&sseu, 0, sizeof(sseu));
-	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
-	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
+	sseu.max_slices = RUNTIME_INFO(i915)->sseu.max_slices;
+	sseu.max_subslices = RUNTIME_INFO(i915)->sseu.max_subslices;
  	sseu.max_eus_per_subslice =
-		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
-
-	with_intel_runtime_pm(dev_priv, wakeref) {
-		if (IS_CHERRYVIEW(dev_priv))
-			cherryview_sseu_device_status(dev_priv, &sseu);
-		else if (IS_BROADWELL(dev_priv))
-			broadwell_sseu_device_status(dev_priv, &sseu);
-		else if (IS_GEN(dev_priv, 9))
-			gen9_sseu_device_status(dev_priv, &sseu);
-		else if (INTEL_GEN(dev_priv) >= 10)
-			gen10_sseu_device_status(dev_priv, &sseu);
+		RUNTIME_INFO(i915)->sseu.max_eus_per_subslice;
+
+	with_intel_runtime_pm(i915, wakeref) {
+		if (IS_CHERRYVIEW(i915))
+			cherryview_sseu_device_status(i915, &sseu);
+		else if (IS_BROADWELL(i915))
+			broadwell_sseu_device_status(i915, &sseu);
+		else if (IS_GEN(i915, 9))
+			gen9_sseu_device_status(i915, &sseu);
+		else if (INTEL_GEN(i915) >= 10)
+			gen10_sseu_device_status(i915, &sseu);
  	}
i915_print_sseu_info(m, false, &sseu);
@@ -4077,15 +4077,15 @@ static const struct file_operations i915_forcewake_fops = {
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	struct drm_i915_private *i915 = m->private;
+	struct i915_hotplug *hotplug = &i915->hotplug;
/* Synchronize with everything first in case there's been an HPD
  	 * storm, but we haven't finished handling it in the kernel yet
  	 */
-	synchronize_irq(dev_priv->drm.irq);
-	flush_work(&dev_priv->hotplug.dig_port_work);
-	flush_work(&dev_priv->hotplug.hotplug_work);
+	synchronize_irq(i915->drm.irq);
+	flush_work(&i915->hotplug.dig_port_work);
+	flush_work(&i915->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
  	seq_printf(m, "Detected: %s\n",
@@ -4099,8 +4099,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
  					loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	struct drm_i915_private *i915 = m->private;
+	struct i915_hotplug *hotplug = &i915->hotplug;
  	unsigned int new_threshold;
  	int i;
  	char *newline;
@@ -4130,15 +4130,15 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
  	else
  		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	hotplug->hpd_storm_threshold = new_threshold;
  	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
  	for_each_hpd_pin(i)
  		hotplug->stats[i].count = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
-	flush_delayed_work(&dev_priv->hotplug.reenable_work);
+	flush_delayed_work(&i915->hotplug.reenable_work);
return len;
  }
@@ -4159,10 +4159,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
  {
-	struct drm_i915_private *dev_priv = m->private;
+	struct drm_i915_private *i915 = m->private;
seq_printf(m, "Enabled: %s\n",
-		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+		   yesno(i915->hotplug.hpd_short_storm_enabled));
return 0;
  }
@@ -4179,8 +4179,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
  					      size_t len, loff_t *offp)
  {
  	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	struct drm_i915_private *i915 = m->private;
+	struct i915_hotplug *hotplug = &i915->hotplug;
  	char *newline;
  	char tmp[16];
  	int i;
@@ -4201,22 +4201,22 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
  	if (strcmp(tmp, "reset") == 0)
-		new_state = !HAS_DP_MST(dev_priv);
+		new_state = !HAS_DP_MST(i915);
  	else if (kstrtobool(tmp, &new_state) != 0)
  		return -EINVAL;
DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
  		      new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	hotplug->hpd_short_storm_enabled = new_state;
  	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
  	for_each_hpd_pin(i)
  		hotplug->stats[i].count = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
-	flush_delayed_work(&dev_priv->hotplug.reenable_work);
+	flush_delayed_work(&i915->hotplug.reenable_work);
return len;
  }
@@ -4232,11 +4232,11 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
static int i915_drrs_ctl_set(void *data, u64 val)
  {
-	struct drm_i915_private *dev_priv = data;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = data;
+	struct drm_device *dev = &i915->drm;
  	struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) < 7)
+	if (INTEL_GEN(i915) < 7)
  		return -ENODEV;
for_each_intel_crtc(dev, crtc) {
@@ -4305,9 +4305,9 @@ i915_fifo_underrun_reset_write(struct file *filp,
  			       const char __user *ubuf,
  			       size_t cnt, loff_t *ppos)
  {
-	struct drm_i915_private *dev_priv = filp->private_data;
+	struct drm_i915_private *i915 = filp->private_data;
  	struct intel_crtc *intel_crtc;
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	int ret;
  	bool reset;
@@ -4347,7 +4347,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
  			return ret;
  	}
- ret = intel_fbc_reset_underrun(dev_priv);
+	ret = intel_fbc_reset_underrun(i915);
  	if (ret)
  		return ret;
@@ -4437,9 +4437,9 @@ static const struct i915_debugfs_files {
  	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
  };
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+int i915_debugfs_register(struct drm_i915_private *i915)
  {
-	struct drm_minor *minor = dev_priv->drm.primary;
+	struct drm_minor *minor = i915->drm.primary;
  	struct dentry *ent;
  	int i;
@@ -4692,7 +4692,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
  int i915_debugfs_connector_add(struct drm_connector *connector)
  {
  	struct dentry *root = connector->debugfs_entry;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
/* The connector must have been registered beforehands. */
  	if (!root)
@@ -4717,7 +4717,7 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
  				    connector, &i915_hdcp_sink_capability_fops);
  	}
- if (INTEL_GEN(dev_priv) >= 10 &&
+	if (INTEL_GEN(i915) >= 10 &&
  	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
  	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
  		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index c0cd22eb916d..3e99eaf8de02 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -10,10 +10,10 @@ struct drm_i915_private;
  struct drm_connector;
#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+int i915_debugfs_register(struct drm_i915_private *i915);
  int i915_debugfs_connector_add(struct drm_connector *connector);
  #else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline int i915_debugfs_register(struct drm_i915_private *i915) { return 0; }
  static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
  #endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 81ff2c78fd55..a09d028ec8d8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -108,11 +108,11 @@ bool i915_error_injected(void)
  		    "providing the dmesg log by booting with drm.debug=0xf"
void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+__i915_printk(struct drm_i915_private *i915, const char *level,
  	      const char *fmt, ...)
  {
  	static bool shown_bug_once;
-	struct device *kdev = dev_priv->drm.dev;
+	struct device *kdev = i915->drm.dev;
  	bool is_error = level[1] <= KERN_ERR[1];
  	bool is_debug = level[1] == KERN_DEBUG[1];
  	struct va_format vaf;
@@ -148,74 +148,74 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
  static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+intel_pch_type(const struct drm_i915_private *i915, unsigned short id)
  {
  	switch (id) {
  	case INTEL_PCH_IBX_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 5));
+		WARN_ON(!IS_GEN(i915, 5));
  		return PCH_IBX;
  	case INTEL_PCH_CPT_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+		WARN_ON(!IS_GEN(i915, 6) && !IS_IVYBRIDGE(i915));
  		return PCH_CPT;
  	case INTEL_PCH_PPT_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+		WARN_ON(!IS_GEN(i915, 6) && !IS_IVYBRIDGE(i915));
  		/* PantherPoint is CPT compatible */
  		return PCH_CPT;
  	case INTEL_PCH_LPT_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+		WARN_ON(!IS_HASWELL(i915) && !IS_BROADWELL(i915));
+		WARN_ON(IS_HSW_ULT(i915) || IS_BDW_ULT(i915));
  		return PCH_LPT;
  	case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+		WARN_ON(!IS_HASWELL(i915) && !IS_BROADWELL(i915));
+		WARN_ON(!IS_HSW_ULT(i915) && !IS_BDW_ULT(i915));
  		return PCH_LPT;
  	case INTEL_PCH_WPT_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+		WARN_ON(!IS_HASWELL(i915) && !IS_BROADWELL(i915));
+		WARN_ON(IS_HSW_ULT(i915) || IS_BDW_ULT(i915));
  		/* WildcatPoint is LPT compatible */
  		return PCH_LPT;
  	case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+		WARN_ON(!IS_HASWELL(i915) && !IS_BROADWELL(i915));
+		WARN_ON(!IS_HSW_ULT(i915) && !IS_BDW_ULT(i915));
  		/* WildcatPoint is LPT compatible */
  		return PCH_LPT;
  	case INTEL_PCH_SPT_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+		WARN_ON(!IS_SKYLAKE(i915) && !IS_KABYLAKE(i915));
  		return PCH_SPT;
  	case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+		WARN_ON(!IS_SKYLAKE(i915) && !IS_KABYLAKE(i915));
  		return PCH_SPT;
  	case INTEL_PCH_KBP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
-			!IS_COFFEELAKE(dev_priv));
+		WARN_ON(!IS_SKYLAKE(i915) && !IS_KABYLAKE(i915) &&
+			!IS_COFFEELAKE(i915));
  		/* KBP is SPT compatible */
  		return PCH_SPT;
  	case INTEL_PCH_CNP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+		WARN_ON(!IS_CANNONLAKE(i915) && !IS_COFFEELAKE(i915));
  		return PCH_CNP;
  	case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+		WARN_ON(!IS_CANNONLAKE(i915) && !IS_COFFEELAKE(i915));
  		return PCH_CNP;
  	case INTEL_PCH_CMP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
-		WARN_ON(!IS_COFFEELAKE(dev_priv));
+		WARN_ON(!IS_COFFEELAKE(i915));
  		/* CometPoint is CNP Compatible */
  		return PCH_CNP;
  	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
  		DRM_DEBUG_KMS("Found Ice Lake PCH\n");
-		WARN_ON(!IS_ICELAKE(dev_priv));
+		WARN_ON(!IS_ICELAKE(i915));
  		return PCH_ICP;
  	default:
  		return PCH_NONE;
@@ -233,7 +233,7 @@ static bool intel_is_virt_pch(unsigned short id,
  }
static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+intel_virt_detect_pch(const struct drm_i915_private *i915)
  {
  	unsigned short id = 0;
@@ -244,19 +244,19 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
  	 * make an educated guess as to which PCH is really there.
  	 */
- if (IS_ICELAKE(dev_priv))
+	if (IS_ICELAKE(i915))
  		id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
-	else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915) || IS_COFFEELAKE(i915))
  		id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
-	else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+	else if (IS_KABYLAKE(i915) || IS_SKYLAKE(i915))
  		id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
-	else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+	else if (IS_HSW_ULT(i915) || IS_BDW_ULT(i915))
  		id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
-	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+	else if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915))
  		id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
-	else if (IS_GEN(dev_priv, 5))
+	else if (IS_GEN(i915, 5))
  		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
@@ -267,7 +267,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
  	return id;
  }
-static void intel_detect_pch(struct drm_i915_private *dev_priv)
+static void intel_detect_pch(struct drm_i915_private *i915)
  {
  	struct pci_dev *pch = NULL;
@@ -291,22 +291,22 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) id = pch->device & INTEL_PCH_DEVICE_ID_MASK; - pch_type = intel_pch_type(dev_priv, id);
+		pch_type = intel_pch_type(i915, id);
  		if (pch_type != PCH_NONE) {
-			dev_priv->pch_type = pch_type;
-			dev_priv->pch_id = id;
+			i915->pch_type = pch_type;
+			i915->pch_id = id;
  			break;
  		} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
  					 pch->subsystem_device)) {
-			id = intel_virt_detect_pch(dev_priv);
-			pch_type = intel_pch_type(dev_priv, id);
+			id = intel_virt_detect_pch(i915);
+			pch_type = intel_pch_type(i915, id);
/* Sanity check virtual PCH id */
  			if (WARN_ON(id && pch_type == PCH_NONE))
  				id = 0;
- dev_priv->pch_type = pch_type;
-			dev_priv->pch_id = id;
+			i915->pch_type = pch_type;
+			i915->pch_id = id;
  			break;
  		}
  	}
@@ -315,10 +315,10 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
  	 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
  	 * display.
  	 */
-	if (pch && !HAS_DISPLAY(dev_priv)) {
+	if (pch && !HAS_DISPLAY(i915)) {
  		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
-		dev_priv->pch_type = PCH_NOP;
-		dev_priv->pch_id = 0;
+		i915->pch_type = PCH_NOP;
+		i915->pch_id = 0;
  	}
if (!pch)
@@ -330,9 +330,9 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
  static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  			       struct drm_file *file_priv)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
+	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	drm_i915_getparam_t *param = data;
  	int value;
@@ -350,40 +350,40 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  		value = pdev->revision;
  		break;
  	case I915_PARAM_NUM_FENCES_AVAIL:
-		value = dev_priv->num_fence_regs;
+		value = i915->num_fence_regs;
  		break;
  	case I915_PARAM_HAS_OVERLAY:
-		value = dev_priv->overlay ? 1 : 0;
+		value = i915->overlay ? 1 : 0;
  		break;
  	case I915_PARAM_HAS_BSD:
-		value = !!dev_priv->engine[VCS0];
+		value = !!i915->engine[VCS0];
  		break;
  	case I915_PARAM_HAS_BLT:
-		value = !!dev_priv->engine[BCS0];
+		value = !!i915->engine[BCS0];
  		break;
  	case I915_PARAM_HAS_VEBOX:
-		value = !!dev_priv->engine[VECS0];
+		value = !!i915->engine[VECS0];
  		break;
  	case I915_PARAM_HAS_BSD2:
-		value = !!dev_priv->engine[VCS1];
+		value = !!i915->engine[VCS1];
  		break;
  	case I915_PARAM_HAS_LLC:
-		value = HAS_LLC(dev_priv);
+		value = HAS_LLC(i915);
  		break;
  	case I915_PARAM_HAS_WT:
-		value = HAS_WT(dev_priv);
+		value = HAS_WT(i915);
  		break;
  	case I915_PARAM_HAS_ALIASING_PPGTT:
-		value = INTEL_PPGTT(dev_priv);
+		value = INTEL_PPGTT(i915);
  		break;
  	case I915_PARAM_HAS_SEMAPHORES:
-		value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
+		value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
  		break;
  	case I915_PARAM_HAS_SECURE_BATCHES:
  		value = capable(CAP_SYS_ADMIN);
  		break;
  	case I915_PARAM_CMD_PARSER_VERSION:
-		value = i915_cmd_parser_get_version(dev_priv);
+		value = i915_cmd_parser_get_version(i915);
  		break;
  	case I915_PARAM_SUBSLICE_TOTAL:
  		value = intel_sseu_subslice_total(sseu);
@@ -397,21 +397,21 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  		break;
  	case I915_PARAM_HAS_GPU_RESET:
  		value = i915_modparams.enable_hangcheck &&
-			intel_has_gpu_reset(dev_priv);
-		if (value && intel_has_reset_engine(dev_priv))
+			intel_has_gpu_reset(i915);
+		if (value && intel_has_reset_engine(i915))
  			value = 2;
  		break;
  	case I915_PARAM_HAS_RESOURCE_STREAMER:
  		value = 0;
  		break;
  	case I915_PARAM_HAS_POOLED_EU:
-		value = HAS_POOLED_EU(dev_priv);
+		value = HAS_POOLED_EU(i915);
  		break;
  	case I915_PARAM_MIN_EU_IN_POOL:
  		value = sseu->min_eu_in_pool;
  		break;
  	case I915_PARAM_HUC_STATUS:
-		value = intel_huc_check_status(&dev_priv->huc);
+		value = intel_huc_check_status(&i915->huc);
  		if (value < 0)
  			return value;
  		break;
@@ -423,7 +423,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  		value = i915_gem_mmap_gtt_version();
  		break;
  	case I915_PARAM_HAS_SCHEDULER:
-		value = dev_priv->caps.scheduler;
+		value = i915->caps.scheduler;
  		break;
case I915_PARAM_MMAP_VERSION:
@@ -456,7 +456,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  		value = 1;
  		break;
  	case I915_PARAM_HAS_CONTEXT_ISOLATION:
-		value = intel_engines_has_context_isolation(dev_priv);
+		value = intel_engines_has_context_isolation(i915);
  		break;
  	case I915_PARAM_SLICE_MASK:
  		value = sseu->slice_mask;
@@ -469,10 +469,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  			return -ENODEV;
  		break;
  	case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-		value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
+		value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
  		break;
  	case I915_PARAM_MMAP_GTT_COHERENT:
-		value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
+		value = INTEL_INFO(i915)->has_coherent_ggtt;
  		break;
  	default:
  		DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -485,13 +485,13 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
  	return 0;
  }
-static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
+static int i915_get_bridge_dev(struct drm_i915_private *i915)
  {
-	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
+	int domain = pci_domain_nr(i915->drm.pdev->bus);
- dev_priv->bridge_dev =
+	i915->bridge_dev =
  		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
-	if (!dev_priv->bridge_dev) {
+	if (!i915->bridge_dev) {
  		DRM_ERROR("bridge device not found\n");
  		return -1;
  	}
@@ -500,16 +500,16 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
/* Allocate space for the MCH regs if needed, return nonzero on error */
  static int
-intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
+intel_alloc_mchbar_resource(struct drm_i915_private *i915)
  {
-	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	int reg = INTEL_GEN(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  	u32 temp_lo, temp_hi = 0;
  	u64 mchbar_addr;
  	int ret;
- if (INTEL_GEN(dev_priv) >= 4)
-		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
-	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+	if (INTEL_GEN(i915) >= 4)
+		pci_read_config_dword(i915->bridge_dev, reg + 4, &temp_hi);
+	pci_read_config_dword(i915->bridge_dev, reg, &temp_lo);
  	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -520,47 +520,47 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
  #endif
/* Get some space for it */
-	dev_priv->mch_res.name = "i915 MCHBAR";
-	dev_priv->mch_res.flags = IORESOURCE_MEM;
-	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
-				     &dev_priv->mch_res,
+	i915->mch_res.name = "i915 MCHBAR";
+	i915->mch_res.flags = IORESOURCE_MEM;
+	ret = pci_bus_alloc_resource(i915->bridge_dev->bus,
+				     &i915->mch_res,
  				     MCHBAR_SIZE, MCHBAR_SIZE,
  				     PCIBIOS_MIN_MEM,
  				     0, pcibios_align_resource,
-				     dev_priv->bridge_dev);
+				     i915->bridge_dev);
  	if (ret) {
  		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
-		dev_priv->mch_res.start = 0;
+		i915->mch_res.start = 0;
  		return ret;
  	}
- if (INTEL_GEN(dev_priv) >= 4)
-		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
-				       upper_32_bits(dev_priv->mch_res.start));
+	if (INTEL_GEN(i915) >= 4)
+		pci_write_config_dword(i915->bridge_dev, reg + 4,
+				       upper_32_bits(i915->mch_res.start));
- pci_write_config_dword(dev_priv->bridge_dev, reg,
-			       lower_32_bits(dev_priv->mch_res.start));
+	pci_write_config_dword(i915->bridge_dev, reg,
+			       lower_32_bits(i915->mch_res.start));
  	return 0;
  }
/* Setup MCHBAR if possible, return true if we should disable it again */
  static void
-intel_setup_mchbar(struct drm_i915_private *dev_priv)
+intel_setup_mchbar(struct drm_i915_private *i915)
  {
-	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	int mchbar_reg = INTEL_GEN(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  	u32 temp;
  	bool enabled;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		return;
- dev_priv->mchbar_need_disable = false;
+	i915->mchbar_need_disable = false;
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
-		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+	if (IS_I915G(i915) || IS_I915GM(i915)) {
+		pci_read_config_dword(i915->bridge_dev, DEVEN, &temp);
  		enabled = !!(temp & DEVEN_MCHBAR_EN);
  	} else {
-		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+		pci_read_config_dword(i915->bridge_dev, mchbar_reg, &temp);
  		enabled = temp & 1;
  	}
@@ -568,56 +568,56 @@ intel_setup_mchbar(struct drm_i915_private *dev_priv)
  	if (enabled)
  		return;
- if (intel_alloc_mchbar_resource(dev_priv))
+	if (intel_alloc_mchbar_resource(i915))
  		return;
- dev_priv->mchbar_need_disable = true;
+	i915->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
-	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
-		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+	if (IS_I915G(i915) || IS_I915GM(i915)) {
+		pci_write_config_dword(i915->bridge_dev, DEVEN,
  				       temp | DEVEN_MCHBAR_EN);
  	} else {
-		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+		pci_read_config_dword(i915->bridge_dev, mchbar_reg, &temp);
+		pci_write_config_dword(i915->bridge_dev, mchbar_reg, temp | 1);
  	}
  }
static void
-intel_teardown_mchbar(struct drm_i915_private *dev_priv)
+intel_teardown_mchbar(struct drm_i915_private *i915)
  {
-	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	int mchbar_reg = INTEL_GEN(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- if (dev_priv->mchbar_need_disable) {
-		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+	if (i915->mchbar_need_disable) {
+		if (IS_I915G(i915) || IS_I915GM(i915)) {
  			u32 deven_val;
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+			pci_read_config_dword(i915->bridge_dev, DEVEN,
  					      &deven_val);
  			deven_val &= ~DEVEN_MCHBAR_EN;
-			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+			pci_write_config_dword(i915->bridge_dev, DEVEN,
  					       deven_val);
  		} else {
  			u32 mchbar_val;
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+			pci_read_config_dword(i915->bridge_dev, mchbar_reg,
  					      &mchbar_val);
  			mchbar_val &= ~1;
-			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+			pci_write_config_dword(i915->bridge_dev, mchbar_reg,
  					       mchbar_val);
  		}
  	}
- if (dev_priv->mch_res.start)
-		release_resource(&dev_priv->mch_res);
+	if (i915->mch_res.start)
+		release_resource(&i915->mch_res);
  }
/* true = enable decode, false = disable decoder */
  static unsigned int i915_vga_set_decode(void *cookie, bool state)
  {
-	struct drm_i915_private *dev_priv = cookie;
+	struct drm_i915_private *i915 = cookie;
- intel_modeset_vga_set_state(dev_priv, state);
+	intel_modeset_vga_set_state(i915, state);
  	if (state)
  		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -668,21 +668,21 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static int i915_load_modeset_init(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
  	int ret;
if (i915_inject_load_failure())
  		return -ENODEV;
- if (HAS_DISPLAY(dev_priv)) {
-		ret = drm_vblank_init(&dev_priv->drm,
-				      INTEL_INFO(dev_priv)->num_pipes);
+	if (HAS_DISPLAY(i915)) {
+		ret = drm_vblank_init(&i915->drm,
+				      INTEL_INFO(i915)->num_pipes);
  		if (ret)
  			goto out;
  	}
- intel_bios_init(dev_priv);
+	intel_bios_init(i915);
/* If we have > 1 VGA cards, then we need to arbitrate access
  	 * to the common VGA resources.
@@ -691,7 +691,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
  	 * then we do not take part in VGA arbitration and the
  	 * vga_client_register() fails with -ENODEV.
  	 */
-	ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
+	ret = vga_client_register(pdev, i915, NULL, i915_vga_set_decode);
  	if (ret && ret != -ENODEV)
  		goto out;
@@ -702,17 +702,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
  		goto cleanup_vga_client;
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
-	intel_update_rawclk(dev_priv);
+	intel_update_rawclk(i915);
- intel_power_domains_init_hw(dev_priv, false);
+	intel_power_domains_init_hw(i915, false);
- intel_csr_ucode_init(dev_priv);
+	intel_csr_ucode_init(i915);
- ret = intel_irq_install(dev_priv);
+	ret = intel_irq_install(i915);
  	if (ret)
  		goto cleanup_csr;
- intel_gmbus_setup(dev_priv);
+	intel_gmbus_setup(i915);
/* Important: The output setup functions called by modeset_init need
  	 * working irqs for e.g. gmbus and dp aux transfers. */
@@ -720,13 +720,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
  	if (ret)
  		goto cleanup_irq;
- ret = i915_gem_init(dev_priv);
+	ret = i915_gem_init(i915);
  	if (ret)
  		goto cleanup_modeset;
- intel_overlay_setup(dev_priv);
+	intel_overlay_setup(i915);
- if (!HAS_DISPLAY(dev_priv))
+	if (!HAS_DISPLAY(i915))
  		return 0;
ret = intel_fbdev_init(dev);
@@ -734,24 +734,24 @@ static int i915_load_modeset_init(struct drm_device *dev)
  		goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
-	intel_hpd_init(dev_priv);
+	intel_hpd_init(i915);
- intel_init_ipc(dev_priv);
+	intel_init_ipc(i915);
return 0; cleanup_gem:
-	i915_gem_suspend(dev_priv);
-	i915_gem_fini_hw(dev_priv);
-	i915_gem_fini(dev_priv);
+	i915_gem_suspend(i915);
+	i915_gem_fini_hw(i915);
+	i915_gem_fini(i915);
  cleanup_modeset:
  	intel_modeset_cleanup(dev);
  cleanup_irq:
  	drm_irq_uninstall(dev);
-	intel_gmbus_teardown(dev_priv);
+	intel_gmbus_teardown(i915);
  cleanup_csr:
-	intel_csr_ucode_fini(dev_priv);
-	intel_power_domains_fini_hw(dev_priv);
+	intel_csr_ucode_fini(i915);
+	intel_power_domains_fini_hw(i915);
  	vga_switcheroo_unregister_client(pdev);
  cleanup_vga_client:
  	vga_client_register(pdev, NULL, NULL, NULL);
@@ -759,11 +759,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
  	return ret;
  }
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *i915)
  {
  	struct apertures_struct *ap;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct pci_dev *pdev = i915->drm.pdev;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	bool primary;
  	int ret;
@@ -784,22 +784,22 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  	return ret;
  }
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
+static void intel_init_dpio(struct drm_i915_private *i915)
  {
  	/*
  	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
  	 * CHV x1 PHY (DP/HDMI D)
  	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
  	 */
-	if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
  		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
  	}
  }
-static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+static int i915_workqueues_init(struct drm_i915_private *i915)
  {
  	/*
  	 * The i915 workqueue is primarily used for batched retirement of
@@ -815,18 +815,18 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
  	 * so there is no point in running more than one instance of the
  	 * workqueue at any time.  Use an ordered one.
  	 */
-	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-	if (dev_priv->wq == NULL)
+	i915->wq = alloc_ordered_workqueue("i915", 0);
+	if (i915->wq == NULL)
  		goto out_err;
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-	if (dev_priv->hotplug.dp_wq == NULL)
+	i915->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (i915->hotplug.dp_wq == NULL)
  		goto out_free_wq;
return 0; out_free_wq:
-	destroy_workqueue(dev_priv->wq);
+	destroy_workqueue(i915->wq);
  out_err:
  	DRM_ERROR("Failed to allocate workqueues.\n");
@@ -842,10 +842,10 @@ static void i915_engines_cleanup(struct drm_i915_private *i915)
  		kfree(engine);
  }
-static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+static void i915_workqueues_cleanup(struct drm_i915_private *i915)
  {
-	destroy_workqueue(dev_priv->hotplug.dp_wq);
-	destroy_workqueue(dev_priv->wq);
+	destroy_workqueue(i915->hotplug.dp_wq);
+	destroy_workqueue(i915->wq);
  }
/*
@@ -858,14 +858,14 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
   * (workarounds have a habit of persisting!). Anything older than that
   * should be removed along with the complications they introduce.
   */
-static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+static void intel_detect_preproduction_hw(struct drm_i915_private *i915)
  {
  	bool pre = false;
- pre |= IS_HSW_EARLY_SDV(dev_priv);
-	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
-	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
-	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+	pre |= IS_HSW_EARLY_SDV(i915);
+	pre |= IS_SKL_REVID(i915, 0, SKL_REVID_F0);
+	pre |= IS_BXT_REVID(i915, 0, BXT_REVID_B_LAST);
+	pre |= IS_KBL_REVID(i915, 0, KBL_REVID_A0);
if (pre) {
  		DRM_ERROR("This is a pre-production stepping. "
@@ -876,7 +876,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
/**
   * i915_driver_init_early - setup state not requiring device access
- * @dev_priv: device private
+ * @i915: device private
   *
   * Initialize everything that is a "SW-only" state, that is state not
   * requiring accessing the device or exposing the driver via kernel internal
@@ -884,152 +884,152 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
   * system memory allocation, setting up device specific attributes and
   * function hooks not requiring accessing the device.
   */
-static int i915_driver_init_early(struct drm_i915_private *dev_priv)
+static int i915_driver_init_early(struct drm_i915_private *i915)
  {
  	int ret = 0;
if (i915_inject_load_failure())
  		return -ENODEV;
- intel_device_info_subplatform_init(dev_priv);
+	intel_device_info_subplatform_init(i915);
- intel_uncore_init_early(&dev_priv->uncore);
+	intel_uncore_init_early(&i915->uncore);
- spin_lock_init(&dev_priv->irq_lock);
-	spin_lock_init(&dev_priv->gpu_error.lock);
-	mutex_init(&dev_priv->backlight_lock);
+	spin_lock_init(&i915->irq_lock);
+	spin_lock_init(&i915->gpu_error.lock);
+	mutex_init(&i915->backlight_lock);
- mutex_init(&dev_priv->sb_lock);
-	pm_qos_add_request(&dev_priv->sb_qos,
+	mutex_init(&i915->sb_lock);
+	pm_qos_add_request(&i915->sb_qos,
  			   PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->av_mutex);
-	mutex_init(&dev_priv->wm.wm_mutex);
-	mutex_init(&dev_priv->pps_mutex);
-	mutex_init(&dev_priv->hdcp_comp_mutex);
+	mutex_init(&i915->av_mutex);
+	mutex_init(&i915->wm.wm_mutex);
+	mutex_init(&i915->pps_mutex);
+	mutex_init(&i915->hdcp_comp_mutex);
- i915_memcpy_init_early(dev_priv);
-	intel_runtime_pm_init_early(dev_priv);
+	i915_memcpy_init_early(i915);
+	intel_runtime_pm_init_early(i915);
- ret = i915_workqueues_init(dev_priv);
+	ret = i915_workqueues_init(i915);
  	if (ret < 0)
  		goto err_engines;
- ret = i915_gem_init_early(dev_priv);
+	ret = i915_gem_init_early(i915);
  	if (ret < 0)
  		goto err_workqueues;
/* This must be called before any calls to HAS_PCH_* */
-	intel_detect_pch(dev_priv);
+	intel_detect_pch(i915);
- intel_wopcm_init_early(&dev_priv->wopcm);
-	intel_uc_init_early(dev_priv);
-	intel_pm_setup(dev_priv);
-	intel_init_dpio(dev_priv);
-	ret = intel_power_domains_init(dev_priv);
+	intel_wopcm_init_early(&i915->wopcm);
+	intel_uc_init_early(i915);
+	intel_pm_setup(i915);
+	intel_init_dpio(i915);
+	ret = intel_power_domains_init(i915);
  	if (ret < 0)
  		goto err_uc;
-	intel_irq_init(dev_priv);
-	intel_hangcheck_init(dev_priv);
-	intel_init_display_hooks(dev_priv);
-	intel_init_clock_gating_hooks(dev_priv);
-	intel_init_audio_hooks(dev_priv);
-	intel_display_crc_init(dev_priv);
+	intel_irq_init(i915);
+	intel_hangcheck_init(i915);
+	intel_init_display_hooks(i915);
+	intel_init_clock_gating_hooks(i915);
+	intel_init_audio_hooks(i915);
+	intel_display_crc_init(i915);
- intel_detect_preproduction_hw(dev_priv);
+	intel_detect_preproduction_hw(i915);
return 0; err_uc:
-	intel_uc_cleanup_early(dev_priv);
-	i915_gem_cleanup_early(dev_priv);
+	intel_uc_cleanup_early(i915);
+	i915_gem_cleanup_early(i915);
  err_workqueues:
-	i915_workqueues_cleanup(dev_priv);
+	i915_workqueues_cleanup(i915);
  err_engines:
-	i915_engines_cleanup(dev_priv);
+	i915_engines_cleanup(i915);
  	return ret;
  }
/**
   * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
- * @dev_priv: device private
+ * @i915: device private
   */
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+static void i915_driver_cleanup_early(struct drm_i915_private *i915)
  {
-	intel_irq_fini(dev_priv);
-	intel_power_domains_cleanup(dev_priv);
-	intel_uc_cleanup_early(dev_priv);
-	i915_gem_cleanup_early(dev_priv);
-	i915_workqueues_cleanup(dev_priv);
-	i915_engines_cleanup(dev_priv);
+	intel_irq_fini(i915);
+	intel_power_domains_cleanup(i915);
+	intel_uc_cleanup_early(i915);
+	i915_gem_cleanup_early(i915);
+	i915_workqueues_cleanup(i915);
+	i915_engines_cleanup(i915);
- pm_qos_remove_request(&dev_priv->sb_qos);
-	mutex_destroy(&dev_priv->sb_lock);
+	pm_qos_remove_request(&i915->sb_qos);
+	mutex_destroy(&i915->sb_lock);
  }
/**
   * i915_driver_init_mmio - setup device MMIO
- * @dev_priv: device private
+ * @i915: device private
   *
   * Setup minimal device state necessary for MMIO accesses later in the
   * initialization sequence. The setup here should avoid any other device-wide
   * side effects or exposing the driver via kernel internal or user space
   * interfaces.
   */
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+static int i915_driver_init_mmio(struct drm_i915_private *i915)
  {
  	int ret;
if (i915_inject_load_failure())
  		return -ENODEV;
- if (i915_get_bridge_dev(dev_priv))
+	if (i915_get_bridge_dev(i915))
  		return -EIO;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
+	ret = intel_uncore_init_mmio(&i915->uncore);
  	if (ret < 0)
  		goto err_bridge;
/* Try to make sure MCHBAR is enabled before poking at it */
-	intel_setup_mchbar(dev_priv);
+	intel_setup_mchbar(i915);
- intel_device_info_init_mmio(dev_priv);
+	intel_device_info_init_mmio(i915);
- intel_uncore_prune_mmio_domains(&dev_priv->uncore);
+	intel_uncore_prune_mmio_domains(&i915->uncore);
- intel_uc_init_mmio(dev_priv);
+	intel_uc_init_mmio(i915);
- ret = intel_engines_init_mmio(dev_priv);
+	ret = intel_engines_init_mmio(i915);
  	if (ret)
  		goto err_uncore;
- i915_gem_init_mmio(dev_priv);
+	i915_gem_init_mmio(i915);
return 0; err_uncore:
-	intel_teardown_mchbar(dev_priv);
-	intel_uncore_fini_mmio(&dev_priv->uncore);
+	intel_teardown_mchbar(i915);
+	intel_uncore_fini_mmio(&i915->uncore);
  err_bridge:
-	pci_dev_put(dev_priv->bridge_dev);
+	pci_dev_put(i915->bridge_dev);
return ret;
  }
/**
   * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
- * @dev_priv: device private
+ * @i915: device private
   */
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+static void i915_driver_cleanup_mmio(struct drm_i915_private *i915)
  {
-	intel_teardown_mchbar(dev_priv);
-	intel_uncore_fini_mmio(&dev_priv->uncore);
-	pci_dev_put(dev_priv->bridge_dev);
+	intel_teardown_mchbar(i915);
+	intel_uncore_fini_mmio(&i915->uncore);
+	pci_dev_put(i915->bridge_dev);
  }
-static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+static void intel_sanitize_options(struct drm_i915_private *i915)
  {
-	intel_gvt_sanitize_options(dev_priv);
+	intel_gvt_sanitize_options(i915);
  }
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
@@ -1131,11 +1131,11 @@ skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
  }
static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
  		       struct dram_dimm_info *dimm,
  		       int channel, char dimm_name, u16 val)
  {
-	if (INTEL_GEN(dev_priv) >= 10) {
+	if (INTEL_GEN(i915) >= 10) {
  		dimm->size = cnl_get_dimm_size(val);
  		dimm->width = cnl_get_dimm_width(val);
  		dimm->ranks = cnl_get_dimm_ranks(val);
@@ -1151,13 +1151,13 @@ skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
  }
static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
+skl_dram_get_channel_info(struct drm_i915_private *i915,
  			  struct dram_channel_info *ch,
  			  int channel, u32 val)
  {
-	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
+	skl_dram_get_dimm_info(i915, &ch->dimm_l,
  			       channel, 'L', val & 0xffff);
-	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
+	skl_dram_get_dimm_info(i915, &ch->dimm_s,
  			       channel, 'S', val >> 16);
if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
@@ -1192,20 +1192,20 @@ intel_is_dram_symmetric(const struct dram_channel_info *ch0,
  }
static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
+skl_dram_get_channels_info(struct drm_i915_private *i915)
  {
-	struct dram_info *dram_info = &dev_priv->dram_info;
+	struct dram_info *dram_info = &i915->dram_info;
  	struct dram_channel_info ch0 = {}, ch1 = {};
  	u32 val;
  	int ret;
val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
+	ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
  	if (ret == 0)
  		dram_info->num_channels++;
val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
+	ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
  	if (ret == 0)
  		dram_info->num_channels++;
@@ -1239,7 +1239,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
  }
static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
+skl_get_dram_type(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -1261,16 +1261,16 @@ skl_get_dram_type(struct drm_i915_private *dev_priv)
  }
static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
+skl_get_dram_info(struct drm_i915_private *i915)
  {
-	struct dram_info *dram_info = &dev_priv->dram_info;
+	struct dram_info *dram_info = &i915->dram_info;
  	u32 mem_freq_khz, val;
  	int ret;
- dram_info->type = skl_get_dram_type(dev_priv);
+	dram_info->type = skl_get_dram_type(i915);
  	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
- ret = skl_dram_get_channels_info(dev_priv);
+	ret = skl_dram_get_channels_info(i915);
  	if (ret)
  		return ret;
@@ -1370,9 +1370,9 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
  }
static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
+bxt_get_dram_info(struct drm_i915_private *i915)
  {
-	struct dram_info *dram_info = &dev_priv->dram_info;
+	struct dram_info *dram_info = &i915->dram_info;
  	u32 dram_channels;
  	u32 mem_freq_khz, val;
  	u8 num_active_channels;
@@ -1443,9 +1443,9 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
  }
static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
+intel_get_dram_info(struct drm_i915_private *i915)
  {
-	struct dram_info *dram_info = &dev_priv->dram_info;
+	struct dram_info *dram_info = &i915->dram_info;
  	int ret;
/*
@@ -1453,15 +1453,15 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
  	 * This is only used for the level 0 watermark latency
  	 * w/a which does not apply to bxt/glk.
  	 */
-	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+	dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
- if (INTEL_GEN(dev_priv) < 9)
+	if (INTEL_GEN(i915) < 9)
  		return;
- if (IS_GEN9_LP(dev_priv))
-		ret = bxt_get_dram_info(dev_priv);
+	if (IS_GEN9_LP(i915))
+		ret = bxt_get_dram_info(i915);
  	else
-		ret = skl_get_dram_info(dev_priv);
+		ret = skl_get_dram_info(i915);
  	if (ret)
  		return;
@@ -1473,7 +1473,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
  		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
  }
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
  {
  	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
  	const unsigned int sets[4] = { 1, 1, 2, 2 };
@@ -1483,16 +1483,16 @@ static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
  		sets[EDRAM_SETS_IDX(cap)];
  }
-static void edram_detect(struct drm_i915_private *dev_priv)
+static void edram_detect(struct drm_i915_private *i915)
  {
  	u32 edram_cap = 0;
- if (!(IS_HASWELL(dev_priv) ||
-	      IS_BROADWELL(dev_priv) ||
-	      INTEL_GEN(dev_priv) >= 9))
+	if (!(IS_HASWELL(i915) ||
+	      IS_BROADWELL(i915) ||
+	      INTEL_GEN(i915) >= 9))
  		return;
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
+	edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we don't have gt funcs set up */ @@ -1503,63 +1503,63 @@ static void edram_detect(struct drm_i915_private *dev_priv)
  	 * The needed capability bits for size calculation are not there with
  	 * pre gen9 so return 128MB always.
  	 */
-	if (INTEL_GEN(dev_priv) < 9)
-		dev_priv->edram_size_mb = 128;
+	if (INTEL_GEN(i915) < 9)
+		i915->edram_size_mb = 128;
  	else
-		dev_priv->edram_size_mb =
-			gen9_edram_size_mb(dev_priv, edram_cap);
+		i915->edram_size_mb =
+			gen9_edram_size_mb(i915, edram_cap);
- DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
+	DRM_INFO("Found %uMB of eDRAM\n", i915->edram_size_mb);
  }
/**
   * i915_driver_init_hw - setup state requiring device access
- * @dev_priv: device private
+ * @i915: device private
   *
   * Setup state that requires accessing the device, but doesn't require
   * exposing the driver via kernel internal or userspace interfaces.
   */
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+static int i915_driver_init_hw(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	int ret;
if (i915_inject_load_failure())
  		return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
+	intel_device_info_runtime_init(i915);
- if (HAS_PPGTT(dev_priv)) {
-		if (intel_vgpu_active(dev_priv) &&
-		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
-			i915_report_error(dev_priv,
+	if (HAS_PPGTT(i915)) {
+		if (intel_vgpu_active(i915) &&
+		    !intel_vgpu_has_full_ppgtt(i915)) {
+			i915_report_error(i915,
  					  "incompatible vGPU found, support for isolated ppGTT required\n");
  			return -ENXIO;
  		}
  	}
- if (HAS_EXECLISTS(dev_priv)) {
+	if (HAS_EXECLISTS(i915)) {
  		/*
  		 * Older GVT emulation depends upon intercepting CSB mmio,
  		 * which we no longer use, preferring to use the HWSP cache
  		 * instead.
  		 */
-		if (intel_vgpu_active(dev_priv) &&
-		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
-			i915_report_error(dev_priv,
+		if (intel_vgpu_active(i915) &&
+		    !intel_vgpu_has_hwsp_emulation(i915)) {
+			i915_report_error(i915,
  					  "old vGPU host found, support for HWSP emulation required\n");
  			return -ENXIO;
  		}
  	}
- intel_sanitize_options(dev_priv);
+	intel_sanitize_options(i915);
/* needs to be done before ggtt probe */
-	edram_detect(dev_priv);
+	edram_detect(i915);
- i915_perf_init(dev_priv);
+	i915_perf_init(i915);
- ret = i915_ggtt_probe_hw(dev_priv);
+	ret = i915_ggtt_probe_hw(i915);
  	if (ret)
  		goto err_perf;
@@ -1567,7 +1567,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
  	 * otherwise the vga fbdev driver falls over.
  	 */
-	ret = i915_kick_out_firmware_fb(dev_priv);
+	ret = i915_kick_out_firmware_fb(i915);
  	if (ret) {
  		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
  		goto err_ggtt;
@@ -1579,11 +1579,11 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  		goto err_ggtt;
  	}
- ret = i915_ggtt_init_hw(dev_priv);
+	ret = i915_ggtt_init_hw(i915);
  	if (ret)
  		goto err_ggtt;
- ret = i915_ggtt_enable_hw(dev_priv);
+	ret = i915_ggtt_enable_hw(i915);
  	if (ret) {
  		DRM_ERROR("failed to enable GGTT\n");
  		goto err_ggtt;
@@ -1592,7 +1592,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  	pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
-	if (IS_GEN(dev_priv, 2)) {
+	if (IS_GEN(i915, 2)) {
  		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
  		if (ret) {
  			DRM_ERROR("failed to set DMA mask\n");
@@ -1609,7 +1609,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  	 * behaviour if any general state is accessed within a page above 4GB,
  	 * which also needs to be handled carefully.
  	 */
-	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
+	if (IS_I965G(i915) || IS_I965GM(i915)) {
  		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1619,13 +1619,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  		}
  	}
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+	pm_qos_add_request(&i915->pm_qos, PM_QOS_CPU_DMA_LATENCY,
  			   PM_QOS_DEFAULT_VALUE);
- intel_uncore_sanitize(dev_priv);
+	intel_uncore_sanitize(i915);
- intel_gt_init_workarounds(dev_priv);
-	i915_gem_load_init_fences(dev_priv);
+	intel_gt_init_workarounds(i915);
+	i915_gem_load_init_fences(i915);
/* On the 945G/GM, the chipset reports the MSI capability on the
  	 * integrated graphics even though the support isn't actually there
@@ -1646,94 +1646,94 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
  	 * device. The kernel then disables that interrupt source and so
  	 * prevents the other device from working properly.
  	 */
-	if (INTEL_GEN(dev_priv) >= 5) {
+	if (INTEL_GEN(i915) >= 5) {
  		if (pci_enable_msi(pdev) < 0)
  			DRM_DEBUG_DRIVER("can't enable MSI");
  	}
- ret = intel_gvt_init(dev_priv);
+	ret = intel_gvt_init(i915);
  	if (ret)
  		goto err_msi;
- intel_opregion_setup(dev_priv);
+	intel_opregion_setup(i915);
  	/*
  	 * Fill the dram structure to get the system raw bandwidth and
  	 * dram info. This will be used for memory latency calculation.
  	 */
-	intel_get_dram_info(dev_priv);
+	intel_get_dram_info(i915);
- intel_bw_init_hw(dev_priv);
+	intel_bw_init_hw(i915);
return 0; err_msi:
  	if (pdev->msi_enabled)
  		pci_disable_msi(pdev);
-	pm_qos_remove_request(&dev_priv->pm_qos);
+	pm_qos_remove_request(&i915->pm_qos);
  err_ggtt:
-	i915_ggtt_cleanup_hw(dev_priv);
+	i915_ggtt_cleanup_hw(i915);
  err_perf:
-	i915_perf_fini(dev_priv);
+	i915_perf_fini(i915);
  	return ret;
  }
/**
   * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
- * @dev_priv: device private
+ * @i915: device private
   */
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+static void i915_driver_cleanup_hw(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
- i915_perf_fini(dev_priv);
+	i915_perf_fini(i915);
if (pdev->msi_enabled)
  		pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+	pm_qos_remove_request(&i915->pm_qos);
  }
/**
   * i915_driver_register - register the driver with the rest of the system
- * @dev_priv: device private
+ * @i915: device private
   *
   * Perform any steps necessary to make the driver available via kernel
   * internal or userspace interfaces.
   */
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static void i915_driver_register(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
- i915_gem_shrinker_register(dev_priv);
-	i915_pmu_register(dev_priv);
+	i915_gem_shrinker_register(i915);
+	i915_pmu_register(i915);
/*
  	 * Notify a valid surface after modesetting,
  	 * when running inside a VM.
  	 */
-	if (intel_vgpu_active(dev_priv))
+	if (intel_vgpu_active(i915))
  		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
/* Reveal our presence to userspace */
  	if (drm_dev_register(dev, 0) == 0) {
-		i915_debugfs_register(dev_priv);
-		i915_setup_sysfs(dev_priv);
+		i915_debugfs_register(i915);
+		i915_setup_sysfs(i915);
/* Depends on sysfs having been initialized */
-		i915_perf_register(dev_priv);
+		i915_perf_register(i915);
  	} else
  		DRM_ERROR("Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv)) {
+	if (HAS_DISPLAY(i915)) {
  		/* Must be done after probing outputs */
-		intel_opregion_register(dev_priv);
+		intel_opregion_register(i915);
  		acpi_video_register();
  	}
- if (IS_GEN(dev_priv, 5))
-		intel_gpu_ips_init(dev_priv);
+	if (IS_GEN(i915, 5))
+		intel_gpu_ips_init(i915);
- intel_audio_init(dev_priv);
+	intel_audio_init(i915);
/*
  	 * Some ports require correctly set-up hpd registers for detection to
@@ -1748,60 +1748,60 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
  	 * We need to coordinate the hotplugs with the asynchronous fbdev
  	 * configuration, for which we use the fbdev->async_cookie.
  	 */
-	if (HAS_DISPLAY(dev_priv))
+	if (HAS_DISPLAY(i915))
  		drm_kms_helper_poll_init(dev);
- intel_power_domains_enable(dev_priv);
-	intel_runtime_pm_enable(dev_priv);
+	intel_power_domains_enable(i915);
+	intel_runtime_pm_enable(i915);
  }
/**
   * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
- * @dev_priv: device private
+ * @i915: device private
   */
-static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+static void i915_driver_unregister(struct drm_i915_private *i915)
  {
-	intel_runtime_pm_disable(dev_priv);
-	intel_power_domains_disable(dev_priv);
+	intel_runtime_pm_disable(i915);
+	intel_power_domains_disable(i915);
- intel_fbdev_unregister(dev_priv);
-	intel_audio_deinit(dev_priv);
+	intel_fbdev_unregister(i915);
+	intel_audio_deinit(i915);
/*
  	 * After flushing the fbdev (incl. a late async config which will
  	 * have delayed queuing of a hotplug event), then flush the hotplug
  	 * events.
  	 */
-	drm_kms_helper_poll_fini(&dev_priv->drm);
+	drm_kms_helper_poll_fini(&i915->drm);
intel_gpu_ips_teardown();
  	acpi_video_unregister();
-	intel_opregion_unregister(dev_priv);
+	intel_opregion_unregister(i915);
- i915_perf_unregister(dev_priv);
-	i915_pmu_unregister(dev_priv);
+	i915_perf_unregister(i915);
+	i915_pmu_unregister(i915);
- i915_teardown_sysfs(dev_priv);
-	drm_dev_unplug(&dev_priv->drm);
+	i915_teardown_sysfs(i915);
+	drm_dev_unplug(&i915->drm);
- i915_gem_shrinker_unregister(dev_priv);
+	i915_gem_shrinker_unregister(i915);
  }
-static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+static void i915_welcome_messages(struct drm_i915_private *i915)
  {
  	if (drm_debug & DRM_UT_DRIVER) {
  		struct drm_printer p = drm_debug_printer("i915 device info:");
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
-			   INTEL_DEVID(dev_priv),
-			   INTEL_REVID(dev_priv),
-			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
-			   intel_subplatform(RUNTIME_INFO(dev_priv),
-					     INTEL_INFO(dev_priv)->platform),
-			   INTEL_GEN(dev_priv));
-
-		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
-		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
+			   INTEL_DEVID(i915),
+			   INTEL_REVID(i915),
+			   intel_platform_name(INTEL_INFO(i915)->platform),
+			   intel_subplatform(RUNTIME_INFO(i915),
+					     INTEL_INFO(i915)->platform),
+			   INTEL_GEN(i915));
+
+		intel_device_info_dump_flags(INTEL_INFO(i915), &p);
+		intel_device_info_dump_runtime(RUNTIME_INFO(i915), &p);
  	}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1871,126 +1871,126 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
  	const struct intel_device_info *match_info =
  		(struct intel_device_info *)ent->driver_data;
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	int ret;
- dev_priv = i915_driver_create(pdev, ent);
-	if (IS_ERR(dev_priv))
-		return PTR_ERR(dev_priv);
+	i915 = i915_driver_create(pdev, ent);
+	if (IS_ERR(i915))
+		return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
  	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
-		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+		i915->drm.driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
  	if (ret)
  		goto out_fini;
- ret = i915_driver_init_early(dev_priv);
+	ret = i915_driver_init_early(i915);
  	if (ret < 0)
  		goto out_pci_disable;
- disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
- ret = i915_driver_init_mmio(dev_priv);
+	ret = i915_driver_init_mmio(i915);
  	if (ret < 0)
  		goto out_runtime_pm_put;
- ret = i915_driver_init_hw(dev_priv);
+	ret = i915_driver_init_hw(i915);
  	if (ret < 0)
  		goto out_cleanup_mmio;
- ret = i915_load_modeset_init(&dev_priv->drm);
+	ret = i915_load_modeset_init(&i915->drm);
  	if (ret < 0)
  		goto out_cleanup_hw;
- i915_driver_register(dev_priv);
+	i915_driver_register(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
- i915_welcome_messages(dev_priv);
+	i915_welcome_messages(i915);
return 0; out_cleanup_hw:
-	i915_driver_cleanup_hw(dev_priv);
-	i915_ggtt_cleanup_hw(dev_priv);
+	i915_driver_cleanup_hw(i915);
+	i915_ggtt_cleanup_hw(i915);
  out_cleanup_mmio:
-	i915_driver_cleanup_mmio(dev_priv);
+	i915_driver_cleanup_mmio(i915);
  out_runtime_pm_put:
-	enable_rpm_wakeref_asserts(dev_priv);
-	i915_driver_cleanup_early(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
+	i915_driver_cleanup_early(i915);
  out_pci_disable:
  	pci_disable_device(pdev);
  out_fini:
-	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-	i915_driver_destroy(dev_priv);
+	i915_load_error(i915, "Device initialization failed (%d)\n", ret);
+	i915_driver_destroy(i915);
  	return ret;
  }
void i915_driver_unload(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
- disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
- i915_driver_unregister(dev_priv);
+	i915_driver_unregister(i915);
/*
  	 * After unregistering the device to prevent any new users, cancel
  	 * all in-flight requests so that we can quickly unbind the active
  	 * resources.
  	 */
-	i915_gem_set_wedged(dev_priv);
+	i915_gem_set_wedged(i915);
/* Flush any external code that still may be under the RCU lock */
  	synchronize_rcu();
- i915_gem_suspend(dev_priv);
+	i915_gem_suspend(i915);
drm_atomic_helper_shutdown(dev); - intel_gvt_cleanup(dev_priv);
+	intel_gvt_cleanup(i915);
intel_modeset_cleanup(dev); - intel_bios_cleanup(dev_priv);
+	intel_bios_cleanup(i915);
vga_switcheroo_unregister_client(pdev);
  	vga_client_register(pdev, NULL, NULL, NULL);
- intel_csr_ucode_fini(dev_priv);
+	intel_csr_ucode_fini(i915);
/* Free error state after interrupts are fully disabled. */
-	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-	i915_reset_error_state(dev_priv);
+	cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+	i915_reset_error_state(i915);
- i915_gem_fini_hw(dev_priv);
+	i915_gem_fini_hw(i915);
- intel_power_domains_fini_hw(dev_priv);
+	intel_power_domains_fini_hw(i915);
- i915_driver_cleanup_hw(dev_priv);
+	i915_driver_cleanup_hw(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
  }
static void i915_driver_release(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
- i915_gem_fini(dev_priv);
+	i915_gem_fini(i915);
- i915_ggtt_cleanup_hw(dev_priv);
-	i915_driver_cleanup_mmio(dev_priv);
+	i915_ggtt_cleanup_hw(i915);
+	i915_driver_cleanup_mmio(i915);
- enable_rpm_wakeref_asserts(dev_priv);
-	intel_runtime_pm_cleanup(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
+	intel_runtime_pm_cleanup(i915);
- i915_driver_cleanup_early(dev_priv);
-	i915_driver_destroy(dev_priv);
+	i915_driver_cleanup_early(i915);
+	i915_driver_destroy(i915);
  }
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -2035,9 +2035,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  	kfree(file_priv);
  }
-static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+static void intel_suspend_encoders(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_encoder *encoder;
drm_modeset_lock_all(dev);
@@ -2047,11 +2047,11 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
  	drm_modeset_unlock_all(dev);
  }
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+static int vlv_resume_prepare(struct drm_i915_private *i915,
  			      bool rpm_resume);
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
+static int vlv_suspend_complete(struct drm_i915_private *i915);
-static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+static bool suspend_to_idle(struct drm_i915_private *i915)
  {
  #if IS_ENABLED(CONFIG_ACPI_SLEEP)
  	if (acpi_target_system_state() < ACPI_STATE_S3)
@@ -2077,15 +2077,15 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
  	pci_power_t opregion_target_state;
- disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
/* We do a lot of poking in a lot of registers, make sure they work
  	 * properly. */
-	intel_power_domains_disable(dev_priv);
+	intel_power_domains_disable(i915);
drm_kms_helper_poll_disable(dev); @@ -2093,40 +2093,40 @@ static int i915_drm_suspend(struct drm_device *dev) intel_display_suspend(dev); - intel_dp_mst_suspend(dev_priv);
+	intel_dp_mst_suspend(i915);
- intel_runtime_pm_disable_interrupts(dev_priv);
-	intel_hpd_cancel_work(dev_priv);
+	intel_runtime_pm_disable_interrupts(i915);
+	intel_hpd_cancel_work(i915);
- intel_suspend_encoders(dev_priv);
+	intel_suspend_encoders(i915);
- intel_suspend_hw(dev_priv);
+	intel_suspend_hw(i915);
- i915_gem_suspend_gtt_mappings(dev_priv);
+	i915_gem_suspend_gtt_mappings(i915);
- i915_save_state(dev_priv);
+	i915_save_state(i915);
- opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
-	intel_opregion_suspend(dev_priv, opregion_target_state);
+	opregion_target_state = suspend_to_idle(i915) ? PCI_D1 : PCI_D3cold;
+	intel_opregion_suspend(i915, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); - dev_priv->suspend_count++;
+	i915->suspend_count++;
- intel_csr_ucode_suspend(dev_priv);
+	intel_csr_ucode_suspend(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return 0;
  }
static enum i915_drm_suspend_mode
-get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
+get_suspend_mode(struct drm_i915_private *i915, bool hibernate)
  {
  	if (hibernate)
  		return I915_DRM_SUSPEND_HIBERNATE;
- if (suspend_to_idle(dev_priv))
+	if (suspend_to_idle(i915))
  		return I915_DRM_SUSPEND_IDLE;
return I915_DRM_SUSPEND_MEM;
@@ -2134,30 +2134,30 @@ get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
  	int ret;
- disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
- i915_gem_suspend_late(dev_priv);
+	i915_gem_suspend_late(i915);
- intel_uncore_suspend(&dev_priv->uncore);
+	intel_uncore_suspend(&i915->uncore);
- intel_power_domains_suspend(dev_priv,
-				    get_suspend_mode(dev_priv, hibernation));
+	intel_power_domains_suspend(i915,
+				    get_suspend_mode(i915, hibernation));
ret = 0;
-	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
-		bxt_enable_dc9(dev_priv);
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		hsw_enable_pc8(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_suspend_complete(dev_priv);
+	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
+		bxt_enable_dc9(i915);
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+		hsw_enable_pc8(i915);
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		ret = vlv_suspend_complete(i915);
if (ret) {
  		DRM_ERROR("Suspend complete failed: %d\n", ret);
-		intel_power_domains_resume(dev_priv);
+		intel_power_domains_resume(i915);
goto out;
  	}
@@ -2175,13 +2175,13 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
  	 * Fujitsu FSC S7110
  	 * Acer Aspire 1830T
  	 */
-	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
+	if (!(hibernation && INTEL_GEN(i915) < 6))
  		pci_set_power_state(pdev, PCI_D3hot);
out:
-	enable_rpm_wakeref_asserts(dev_priv);
-	if (!dev_priv->uncore.user_forcewake.count)
-		intel_runtime_pm_cleanup(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
+	if (!i915->uncore.user_forcewake.count)
+		intel_runtime_pm_cleanup(i915);
return ret;
  }
@@ -2212,24 +2212,24 @@ static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int ret;
- disable_rpm_wakeref_asserts(dev_priv);
-	intel_sanitize_gt_powersave(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
+	intel_sanitize_gt_powersave(i915);
- i915_gem_sanitize(dev_priv);
+	i915_gem_sanitize(i915);
- ret = i915_ggtt_enable_hw(dev_priv);
+	ret = i915_ggtt_enable_hw(i915);
  	if (ret)
  		DRM_ERROR("failed to re-enable GGTT\n");
- intel_csr_ucode_resume(dev_priv);
+	intel_csr_ucode_resume(i915);
- i915_restore_state(dev_priv);
-	intel_pps_unlock_regs_wa(dev_priv);
+	i915_restore_state(i915);
+	intel_pps_unlock_regs_wa(i915);
- intel_init_pch_refclk(dev_priv);
+	intel_init_pch_refclk(i915);
/*
  	 * Interrupts have to be enabled before any batches are run. If not the
@@ -2241,21 +2241,21 @@ static int i915_drm_resume(struct drm_device *dev)
  	 * Modeset enabling in intel_modeset_init_hw() also needs working
  	 * interrupts.
  	 */
-	intel_runtime_pm_enable_interrupts(dev_priv);
+	intel_runtime_pm_enable_interrupts(i915);
drm_mode_config_reset(dev); - i915_gem_resume(dev_priv);
+	i915_gem_resume(i915);
intel_modeset_init_hw(dev);
-	intel_init_clock_gating(dev_priv);
+	intel_init_clock_gating(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->display.hpd_irq_setup)
+		i915->display.hpd_irq_setup(i915);
+	spin_unlock_irq(&i915->irq_lock);
- intel_dp_mst_resume(dev_priv);
+	intel_dp_mst_resume(i915);
intel_display_resume(dev); @@ -2267,23 +2267,23 @@ static int i915_drm_resume(struct drm_device *dev)
  	 * bother with the tiny race here where we might lose hotplug
  	 * notifications.
  	 * */
-	intel_hpd_init(dev_priv);
+	intel_hpd_init(i915);
- intel_opregion_resume(dev_priv);
+	intel_opregion_resume(i915);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); - intel_power_domains_enable(dev_priv);
+	intel_power_domains_enable(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return 0;
  }
static int i915_drm_resume_early(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
  	int ret;
/*
@@ -2330,32 +2330,32 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev); - disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_resume_prepare(dev_priv, false);
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		ret = vlv_resume_prepare(i915, false);
  	if (ret)
  		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
  			  ret);
- intel_uncore_resume_early(&dev_priv->uncore);
+	intel_uncore_resume_early(&i915->uncore);
- i915_check_and_clear_faults(dev_priv);
+	i915_check_and_clear_faults(i915);
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
-		gen9_sanitize_dc_state(dev_priv);
-		bxt_disable_dc9(dev_priv);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		hsw_disable_pc8(dev_priv);
+	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+		gen9_sanitize_dc_state(i915);
+		bxt_disable_dc9(i915);
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		hsw_disable_pc8(i915);
  	}
- intel_uncore_sanitize(dev_priv);
+	intel_uncore_sanitize(i915);
- intel_power_domains_resume(dev_priv);
+	intel_power_domains_resume(i915);
- intel_gt_sanitize(dev_priv, true);
+	intel_gt_sanitize(i915, true);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
@@ -2540,9 +2540,9 @@ static int i915_pm_restore(struct device *kdev)
   * a black-box for the driver. Further investigation is needed to reduce the
   * saved/restored registers even further, by following the same 3 criteria.
   */
-static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
  {
-	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+	struct vlv_s0ix_state *s = &i915->vlv_s0ix_state;
  	int i;
/* GAM 0x4000-0x4770 */
@@ -2621,9 +2621,9 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
  	 */
  }
-static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
  {
-	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+	struct vlv_s0ix_state *s = &i915->vlv_s0ix_state;
  	u32 val;
  	int i;
@@ -2732,7 +2732,7 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
  	return ret;
  }
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
+int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
  {
  	u32 val;
  	int err;
@@ -2746,7 +2746,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  	if (!force_on)
  		return 0;
- err = intel_wait_for_register(&dev_priv->uncore,
+	err = intel_wait_for_register(&i915->uncore,
  				      VLV_GTLC_SURVIVABILITY_REG,
  				      VLV_GFX_CLK_STATUS_BIT,
  				      VLV_GFX_CLK_STATUS_BIT,
@@ -2758,7 +2758,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  	return err;
  }
-static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
+static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
  {
  	u32 mask;
  	u32 val;
@@ -2774,14 +2774,14 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
  	mask = VLV_GTLC_ALLOWWAKEACK;
  	val = allow ? mask : 0;
- err = vlv_wait_for_pw_status(dev_priv, mask, val);
+	err = vlv_wait_for_pw_status(i915, mask, val);
  	if (err)
  		DRM_ERROR("timeout disabling GT waking\n");
return err;
  }
-static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+static void vlv_wait_for_gt_wells(struct drm_i915_private *i915,
  				  bool wait_for_on)
  {
  	u32 mask;
@@ -2797,12 +2797,12 @@ static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
  	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
  	 * reset and we are trying to force the machine to sleep.
  	 */
-	if (vlv_wait_for_pw_status(dev_priv, mask, val))
+	if (vlv_wait_for_pw_status(i915, mask, val))
  		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
  				 onoff(wait_for_on));
  }
-static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
+static void vlv_check_no_gt_access(struct drm_i915_private *i915)
  {
  	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
  		return;
@@ -2811,7 +2811,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
  	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
  }
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+static int vlv_suspend_complete(struct drm_i915_private *i915)
  {
  	u32 mask;
  	int err;
@@ -2820,25 +2820,25 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
  	 * Bspec defines the following GT well on flags as debug only, so
  	 * don't treat them as hard failures.
  	 */
-	vlv_wait_for_gt_wells(dev_priv, false);
+	vlv_wait_for_gt_wells(i915, false);
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
  	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
- vlv_check_no_gt_access(dev_priv);
+	vlv_check_no_gt_access(i915);
- err = vlv_force_gfx_clock(dev_priv, true);
+	err = vlv_force_gfx_clock(i915, true);
  	if (err)
  		goto err1;
- err = vlv_allow_gt_wake(dev_priv, false);
+	err = vlv_allow_gt_wake(i915, false);
  	if (err)
  		goto err2;
- if (!IS_CHERRYVIEW(dev_priv))
-		vlv_save_gunit_s0ix_state(dev_priv);
+	if (!IS_CHERRYVIEW(i915))
+		vlv_save_gunit_s0ix_state(i915);
- err = vlv_force_gfx_clock(dev_priv, false);
+	err = vlv_force_gfx_clock(i915, false);
  	if (err)
  		goto err2;
@@ -2846,14 +2846,14 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) err2:
  	/* For safety always re-enable waking and disable gfx clock forcing */
-	vlv_allow_gt_wake(dev_priv, true);
+	vlv_allow_gt_wake(i915, true);
  err1:
-	vlv_force_gfx_clock(dev_priv, false);
+	vlv_force_gfx_clock(i915, false);
return err;
  }
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+static int vlv_resume_prepare(struct drm_i915_private *i915,
  				bool rpm_resume)
  {
  	int err;
@@ -2864,23 +2864,23 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
  	 * can do at this point. Return the first error code (which will also
  	 * leave RPM permanently disabled).
  	 */
-	ret = vlv_force_gfx_clock(dev_priv, true);
+	ret = vlv_force_gfx_clock(i915, true);
- if (!IS_CHERRYVIEW(dev_priv))
-		vlv_restore_gunit_s0ix_state(dev_priv);
+	if (!IS_CHERRYVIEW(i915))
+		vlv_restore_gunit_s0ix_state(i915);
- err = vlv_allow_gt_wake(dev_priv, true);
+	err = vlv_allow_gt_wake(i915, true);
  	if (!ret)
  		ret = err;
- err = vlv_force_gfx_clock(dev_priv, false);
+	err = vlv_force_gfx_clock(i915, false);
  	if (!ret)
  		ret = err;
- vlv_check_no_gt_access(dev_priv);
+	vlv_check_no_gt_access(i915);
if (rpm_resume)
-		intel_init_clock_gating(dev_priv);
+		intel_init_clock_gating(i915);
return ret;
  }
@@ -2889,80 +2889,80 @@ static int intel_runtime_suspend(struct device *kdev)
  {
  	struct pci_dev *pdev = to_pci_dev(kdev);
  	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int ret;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
+	if (WARN_ON_ONCE(!(i915->gt_pm.rc6.enabled && HAS_RC6(i915))))
  		return -ENODEV;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(i915)))
  		return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n"); - disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
/*
  	 * We are safe here against re-faults, since the fault handler takes
  	 * an RPM reference.
  	 */
-	i915_gem_runtime_suspend(dev_priv);
+	i915_gem_runtime_suspend(i915);
- intel_uc_runtime_suspend(dev_priv);
+	intel_uc_runtime_suspend(i915);
- intel_runtime_pm_disable_interrupts(dev_priv);
+	intel_runtime_pm_disable_interrupts(i915);
- intel_uncore_suspend(&dev_priv->uncore);
+	intel_uncore_suspend(&i915->uncore);
ret = 0;
-	if (INTEL_GEN(dev_priv) >= 11) {
-		icl_display_core_uninit(dev_priv);
-		bxt_enable_dc9(dev_priv);
-	} else if (IS_GEN9_LP(dev_priv)) {
-		bxt_display_core_uninit(dev_priv);
-		bxt_enable_dc9(dev_priv);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		hsw_enable_pc8(dev_priv);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		ret = vlv_suspend_complete(dev_priv);
+	if (INTEL_GEN(i915) >= 11) {
+		icl_display_core_uninit(i915);
+		bxt_enable_dc9(i915);
+	} else if (IS_GEN9_LP(i915)) {
+		bxt_display_core_uninit(i915);
+		bxt_enable_dc9(i915);
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		hsw_enable_pc8(i915);
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		ret = vlv_suspend_complete(i915);
  	}
if (ret) {
  		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-		intel_uncore_runtime_resume(&dev_priv->uncore);
+		intel_uncore_runtime_resume(&i915->uncore);
- intel_runtime_pm_enable_interrupts(dev_priv);
+		intel_runtime_pm_enable_interrupts(i915);
- intel_uc_resume(dev_priv);
+		intel_uc_resume(i915);
- i915_gem_init_swizzling(dev_priv);
-		i915_gem_restore_fences(dev_priv);
+		i915_gem_init_swizzling(i915);
+		i915_gem_restore_fences(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+		enable_rpm_wakeref_asserts(i915);
return ret;
  	}
- enable_rpm_wakeref_asserts(dev_priv);
-	intel_runtime_pm_cleanup(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
+	intel_runtime_pm_cleanup(i915);
- if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
+	if (intel_uncore_arm_unclaimed_mmio_detection(&i915->uncore))
  		DRM_ERROR("Unclaimed access detected prior to suspending\n");
- dev_priv->runtime_pm.suspended = true;
+	i915->runtime_pm.suspended = true;
/*
  	 * FIXME: We really should find a document that references the arguments
  	 * used below!
  	 */
-	if (IS_BROADWELL(dev_priv)) {
+	if (IS_BROADWELL(i915)) {
  		/*
  		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
  		 * being detected, and the call we do at intel_runtime_resume()
  		 * won't be able to restore them. Since PCI_D3hot matches the
  		 * actual specification and appears to be working, use it.
  		 */
-		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
+		intel_opregion_notify_adapter(i915, PCI_D3hot);
  	} else {
  		/*
  		 * current versions of firmware which depend on this opregion
@@ -2971,13 +2971,13 @@ static int intel_runtime_suspend(struct device *kdev)
  		 * to distinguish it from notifications that might be sent via
  		 * the suspend path.
  		 */
-		intel_opregion_notify_adapter(dev_priv, PCI_D1);
+		intel_opregion_notify_adapter(i915, PCI_D1);
  	}
- assert_forcewakes_inactive(&dev_priv->uncore);
+	assert_forcewakes_inactive(&i915->uncore);
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
-		intel_hpd_poll_init(dev_priv);
+	if (!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915))
+		intel_hpd_poll_init(i915);
DRM_DEBUG_KMS("Device suspended\n");
  	return 0;
@@ -2987,69 +2987,69 @@ static int intel_runtime_resume(struct device *kdev)
  {
  	struct pci_dev *pdev = to_pci_dev(kdev);
  	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int ret = 0;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(i915)))
  		return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n"); - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
-	disable_rpm_wakeref_asserts(dev_priv);
+	WARN_ON_ONCE(atomic_read(&i915->runtime_pm.wakeref_count));
+	disable_rpm_wakeref_asserts(i915);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
-	dev_priv->runtime_pm.suspended = false;
-	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
+	intel_opregion_notify_adapter(i915, PCI_D0);
+	i915->runtime_pm.suspended = false;
+	if (intel_uncore_unclaimed_mmio(&i915->uncore))
  		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (INTEL_GEN(dev_priv) >= 11) {
-		bxt_disable_dc9(dev_priv);
-		icl_display_core_init(dev_priv, true);
-		if (dev_priv->csr.dmc_payload) {
-			if (dev_priv->csr.allowed_dc_mask &
+	if (INTEL_GEN(i915) >= 11) {
+		bxt_disable_dc9(i915);
+		icl_display_core_init(i915, true);
+		if (i915->csr.dmc_payload) {
+			if (i915->csr.allowed_dc_mask &
  			    DC_STATE_EN_UPTO_DC6)
-				skl_enable_dc6(dev_priv);
-			else if (dev_priv->csr.allowed_dc_mask &
+				skl_enable_dc6(i915);
+			else if (i915->csr.allowed_dc_mask &
  				 DC_STATE_EN_UPTO_DC5)
-				gen9_enable_dc5(dev_priv);
+				gen9_enable_dc5(i915);
  		}
-	} else if (IS_GEN9_LP(dev_priv)) {
-		bxt_disable_dc9(dev_priv);
-		bxt_display_core_init(dev_priv, true);
-		if (dev_priv->csr.dmc_payload &&
-		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
-			gen9_enable_dc5(dev_priv);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		hsw_disable_pc8(dev_priv);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		ret = vlv_resume_prepare(dev_priv, true);
+	} else if (IS_GEN9_LP(i915)) {
+		bxt_disable_dc9(i915);
+		bxt_display_core_init(i915, true);
+		if (i915->csr.dmc_payload &&
+		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+			gen9_enable_dc5(i915);
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		hsw_disable_pc8(i915);
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		ret = vlv_resume_prepare(i915, true);
  	}
- intel_uncore_runtime_resume(&dev_priv->uncore);
+	intel_uncore_runtime_resume(&i915->uncore);
- intel_runtime_pm_enable_interrupts(dev_priv);
+	intel_runtime_pm_enable_interrupts(i915);
- intel_uc_resume(dev_priv);
+	intel_uc_resume(i915);
/*
  	 * No point of rolling back things in case of an error, as the best
  	 * we can do is to hope that things will still work (and disable RPM).
  	 */
-	i915_gem_init_swizzling(dev_priv);
-	i915_gem_restore_fences(dev_priv);
+	i915_gem_init_swizzling(i915);
+	i915_gem_restore_fences(i915);
/*
  	 * On VLV/CHV display interrupts are part of the display
  	 * power well, so hpd is reinitialized from there. For
  	 * everyone else do it here.
  	 */
-	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
-		intel_hpd_init(dev_priv);
+	if (!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915))
+		intel_hpd_init(i915);
- intel_enable_ipc(dev_priv);
+	intel_enable_ipc(i915);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
if (ret)
  		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eb98d285d290..a1adb79ca2c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -208,7 +208,7 @@ struct i915_mm_struct;
  struct i915_mmu_object;
struct drm_i915_file_private {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	struct drm_file *file;
struct {
@@ -278,12 +278,12 @@ struct dpll;
  struct intel_cdclk_state;
struct drm_i915_display_funcs {
-	void (*get_cdclk)(struct drm_i915_private *dev_priv,
+	void (*get_cdclk)(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state);
-	void (*set_cdclk)(struct drm_i915_private *dev_priv,
+	void (*set_cdclk)(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe);
-	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
+	int (*get_fifo_size)(struct drm_i915_private *i915,
  			     enum i9xx_plane_id i9xx_plane);
  	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
  	int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
@@ -317,8 +317,8 @@ struct drm_i915_display_funcs {
  				    const struct drm_connector_state *old_conn_state);
  	void (*fdi_link_train)(struct intel_crtc *crtc,
  			       const struct intel_crtc_state *crtc_state);
-	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
-	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
+	void (*init_clock_gating)(struct drm_i915_private *i915);
+	void (*hpd_irq_setup)(struct drm_i915_private *i915);
  	/* clock updates for mode set */
  	/* cursor updates */
  	/* render clock increase/decrease */
@@ -552,7 +552,7 @@ struct intel_gmbus {
  	u32 reg0;
  	i915_reg_t gpio_reg;
  	struct i2c_algo_bit_data bit_algo;
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  };
struct i915_suspend_saved_registers {
@@ -641,7 +641,7 @@ struct intel_rps {
/*
  	 * work, interrupts_enabled and pm_iir are protected by
-	 * dev_priv->irq_lock
+	 * i915->irq_lock
  	 */
  	struct work_struct work;
  	bool interrupts_enabled;
@@ -1240,9 +1240,9 @@ struct i915_perf_stream_ops {
   */
  struct i915_perf_stream {
  	/**
-	 * @dev_priv: i915 drm device
+	 * @i915: i915 drm device
  	 */
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
/**
  	 * @link: Links the stream into ``&drm_i915_private->streams``
@@ -1302,20 +1302,20 @@ struct i915_oa_ops {
  	 * @is_valid_b_counter_reg: Validates register's address for
  	 * programming boolean counters for a particular platform.
  	 */
-	bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
+	bool (*is_valid_b_counter_reg)(struct drm_i915_private *i915,
  				       u32 addr);
/**
  	 * @is_valid_mux_reg: Validates register's address for programming mux
  	 * for a particular platform.
  	 */
-	bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
+	bool (*is_valid_mux_reg)(struct drm_i915_private *i915, u32 addr);
/**
  	 * @is_valid_flex_reg: Validates register's address for programming
  	 * flex EU filtering for a particular platform.
  	 */
-	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
+	bool (*is_valid_flex_reg)(struct drm_i915_private *i915, u32 addr);
/**
  	 * @enable_metric_set: Selects and applies any MUX configuration to set
@@ -1329,7 +1329,7 @@ struct i915_oa_ops {
  	 * @disable_metric_set: Remove system constraints associated with using
  	 * the OA unit.
  	 */
-	void (*disable_metric_set)(struct drm_i915_private *dev_priv);
+	void (*disable_metric_set)(struct drm_i915_private *i915);
/**
  	 * @oa_enable: Enable periodic sampling
@@ -1357,7 +1357,7 @@ struct i915_oa_ops {
  	 * handling the OA unit tail pointer race that affects multiple
  	 * generations.
  	 */
-	u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
+	u32 (*oa_hw_tail_read)(struct drm_i915_private *i915);
  };
struct intel_cdclk_state {
@@ -1750,13 +1750,13 @@ struct drm_i915_private {
/*
  		 * Lock associated with adding/modifying/removing OA configs
-		 * in dev_priv->perf.metrics_idr.
+		 * in i915->perf.metrics_idr.
  		 */
  		struct mutex metrics_lock;
/*
  		 * List of dynamic configurations, you need to hold
-		 * dev_priv->perf.metrics_lock to access it.
+		 * i915->perf.metrics_lock to access it.
  		 */
  		struct idr metrics_idr;
@@ -1772,7 +1772,7 @@ struct drm_i915_private {
  			 * The stream currently using the OA unit. If accessed
  			 * outside a syscall associated to its file
  			 * descriptor, you need to hold
-			 * dev_priv->drm.struct_mutex.
+			 * i915->drm.struct_mutex.
  			 */
  			struct i915_perf_stream *exclusive_stream;
@@ -2066,15 +2066,15 @@ enum hdmi_force_audio {
  	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
  		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
-#define RUNTIME_INFO(dev_priv)	(&(dev_priv)->__runtime)
-#define DRIVER_CAPS(dev_priv)	(&(dev_priv)->caps)
+#define INTEL_INFO(i915)	(&(i915)->__info)
+#define RUNTIME_INFO(i915)	(&(i915)->__runtime)
+#define DRIVER_CAPS(i915)	(&(i915)->caps)
-#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
-#define INTEL_DEVID(dev_priv)	(RUNTIME_INFO(dev_priv)->device_id)
+#define INTEL_GEN(i915)	(INTEL_INFO(i915)->gen)
+#define INTEL_DEVID(i915)	(RUNTIME_INFO(i915)->device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
+#define INTEL_REVID(i915)	((i915)->drm.pdev->revision)
#define INTEL_GEN_MASK(s, e) ( \
  	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
@@ -2082,12 +2082,12 @@ enum hdmi_force_audio {
  	GENMASK((e) - 1, (s) - 1))
/* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN_RANGE(dev_priv, s, e) \
-	(!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+#define IS_GEN_RANGE(i915, s, e) \
+	(!!(INTEL_INFO(i915)->gen_mask & INTEL_GEN_MASK((s), (e))))
-#define IS_GEN(dev_priv, n) \
+#define IS_GEN(i915, n) \
  	(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
-	 INTEL_INFO(dev_priv)->gen == (n))
+	 INTEL_INFO(i915)->gen == (n))
/*
   * Return true if revision is in range [since,until] inclusive.
@@ -2159,88 +2159,88 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
  }
-#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
-
-#define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
-#define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
-#define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
-#define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
-#define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
-#define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
-#define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
-#define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
-#define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
-#define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
-#define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
-#define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
-#define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
-#define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
-#define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
-#define IS_IRONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
-#define IS_IRONLAKE_M(dev_priv) \
-	(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
-#define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
-#define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 1)
-#define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
-#define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
-#define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
-#define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
-#define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
-#define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
-#define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
-#define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
-#define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
-#define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
-#define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_ELKHARTLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
-#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
-				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_BDW_ULX(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_ULT(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_GT1(dev_priv)	(IS_HASWELL(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 1)
+#define IS_MOBILE(i915)	(INTEL_INFO(i915)->is_mobile)
+
+#define IS_I830(i915)	IS_PLATFORM(i915, INTEL_I830)
+#define IS_I845G(i915)	IS_PLATFORM(i915, INTEL_I845G)
+#define IS_I85X(i915)	IS_PLATFORM(i915, INTEL_I85X)
+#define IS_I865G(i915)	IS_PLATFORM(i915, INTEL_I865G)
+#define IS_I915G(i915)	IS_PLATFORM(i915, INTEL_I915G)
+#define IS_I915GM(i915)	IS_PLATFORM(i915, INTEL_I915GM)
+#define IS_I945G(i915)	IS_PLATFORM(i915, INTEL_I945G)
+#define IS_I945GM(i915)	IS_PLATFORM(i915, INTEL_I945GM)
+#define IS_I965G(i915)	IS_PLATFORM(i915, INTEL_I965G)
+#define IS_I965GM(i915)	IS_PLATFORM(i915, INTEL_I965GM)
+#define IS_G45(i915)	IS_PLATFORM(i915, INTEL_G45)
+#define IS_GM45(i915)	IS_PLATFORM(i915, INTEL_GM45)
+#define IS_G4X(i915)	(IS_G45(i915) || IS_GM45(i915))
+#define IS_PINEVIEW(i915)	IS_PLATFORM(i915, INTEL_PINEVIEW)
+#define IS_G33(i915)	IS_PLATFORM(i915, INTEL_G33)
+#define IS_IRONLAKE(i915)	IS_PLATFORM(i915, INTEL_IRONLAKE)
+#define IS_IRONLAKE_M(i915) \
+	(IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915))
+#define IS_IVYBRIDGE(i915)	IS_PLATFORM(i915, INTEL_IVYBRIDGE)
+#define IS_IVB_GT1(i915)	(IS_IVYBRIDGE(i915) && \
+				 INTEL_INFO(i915)->gt == 1)
+#define IS_VALLEYVIEW(i915)	IS_PLATFORM(i915, INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(i915)	IS_PLATFORM(i915, INTEL_CHERRYVIEW)
+#define IS_HASWELL(i915)	IS_PLATFORM(i915, INTEL_HASWELL)
+#define IS_BROADWELL(i915)	IS_PLATFORM(i915, INTEL_BROADWELL)
+#define IS_SKYLAKE(i915)	IS_PLATFORM(i915, INTEL_SKYLAKE)
+#define IS_BROXTON(i915)	IS_PLATFORM(i915, INTEL_BROXTON)
+#define IS_KABYLAKE(i915)	IS_PLATFORM(i915, INTEL_KABYLAKE)
+#define IS_GEMINILAKE(i915)	IS_PLATFORM(i915, INTEL_GEMINILAKE)
+#define IS_COFFEELAKE(i915)	IS_PLATFORM(i915, INTEL_COFFEELAKE)
+#define IS_CANNONLAKE(i915)	IS_PLATFORM(i915, INTEL_CANNONLAKE)
+#define IS_ICELAKE(i915)	IS_PLATFORM(i915, INTEL_ICELAKE)
+#define IS_ELKHARTLAKE(i915)	IS_PLATFORM(i915, INTEL_ELKHARTLAKE)
+#define IS_HSW_EARLY_SDV(i915) (IS_HASWELL(i915) && \
+				    (INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(i915) \
+	IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(i915) \
+	IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_BDW_GT3(i915)	(IS_BROADWELL(i915) && \
+				 INTEL_INFO(i915)->gt == 3)
+#define IS_HSW_ULT(i915) \
+	IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_HSW_GT3(i915)	(IS_HASWELL(i915) && \
+				 INTEL_INFO(i915)->gt == 3)
+#define IS_HSW_GT1(i915)	(IS_HASWELL(i915) && \
+				 INTEL_INFO(i915)->gt == 1)
  /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_ULT(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_SKL_ULX(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_KBL_ULT(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_KBL_ULX(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 2)
-#define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 3)
-#define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 4)
-#define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 2)
-#define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CFL_ULT(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CFL_ULX(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 2)
-#define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
-				 INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CNL_WITH_PORT_F(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
-#define IS_ICL_WITH_PORT_F(dev_priv) \
-	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_HSW_ULX(i915) \
+	IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(i915) \
+	IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(i915) \
+	IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(i915) \
+	IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(i915) \
+	IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_GT2(i915)	(IS_SKYLAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 2)
+#define IS_SKL_GT3(i915)	(IS_SKYLAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 3)
+#define IS_SKL_GT4(i915)	(IS_SKYLAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 4)
+#define IS_KBL_GT2(i915)	(IS_KABYLAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 2)
+#define IS_KBL_GT3(i915)	(IS_KABYLAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 3)
+#define IS_CFL_ULT(i915) \
+	IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CFL_ULX(i915) \
+	IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_CFL_GT2(i915)	(IS_COFFEELAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 2)
+#define IS_CFL_GT3(i915)	(IS_COFFEELAKE(i915) && \
+				 INTEL_INFO(i915)->gt == 3)
+#define IS_CNL_WITH_PORT_F(i915) \
+	IS_SUBPLATFORM(i915, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_ICL_WITH_PORT_F(i915) \
+	IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define SKL_REVID_A0 0x0
  #define SKL_REVID_B0		0x1
@@ -2259,8 +2259,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  #define BXT_REVID_B_LAST	0x8
  #define BXT_REVID_C0		0x9
-#define IS_BXT_REVID(dev_priv, since, until) \
-	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
+#define IS_BXT_REVID(i915, since, until) \
+	(IS_BROXTON(i915) && IS_REVID(i915, since, until))
#define KBL_REVID_A0 0x0
  #define KBL_REVID_B0		0x1
@@ -2268,14 +2268,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  #define KBL_REVID_D0		0x3
  #define KBL_REVID_E0		0x4
-#define IS_KBL_REVID(dev_priv, since, until) \
-	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+#define IS_KBL_REVID(i915, since, until) \
+	(IS_KABYLAKE(i915) && IS_REVID(i915, since, until))
#define GLK_REVID_A0 0x0
  #define GLK_REVID_A1		0x1
-#define IS_GLK_REVID(dev_priv, since, until) \
-	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+#define IS_GLK_REVID(i915, since, until) \
+	(IS_GEMINILAKE(i915) && IS_REVID(i915, since, until))
#define CNL_REVID_A0 0x0
  #define CNL_REVID_B0		0x1
@@ -2293,122 +2293,122 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  #define IS_ICL_REVID(p, since, until) \
  	(IS_ICELAKE(p) && IS_REVID(p, since, until))
-#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
+#define IS_LP(i915)	(INTEL_INFO(i915)->is_lp)
+#define IS_GEN9_LP(i915)	(IS_GEN(i915, 9) && IS_LP(i915))
+#define IS_GEN9_BC(i915)	(IS_GEN(i915, 9) && !IS_LP(i915))
-#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+#define HAS_ENGINE(i915, id) (INTEL_INFO(i915)->engine_mask & BIT(id))
-#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+#define ENGINE_INSTANCES_MASK(i915, first, count) ({		\
  	unsigned int first__ = (first);					\
  	unsigned int count__ = (count);					\
-	(INTEL_INFO(dev_priv)->engine_mask &				\
+	(INTEL_INFO(i915)->engine_mask &				\
  	 GENMASK(first__ + count__ - 1, first__)) >> first__;		\
  })
-#define VDBOX_MASK(dev_priv) \
-	ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(dev_priv) \
-	ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
-
-#define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
-#define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
-#define HAS_EDRAM(dev_priv)	((dev_priv)->edram_size_mb)
-#define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
-				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-
-#define HWS_NEEDS_PHYSICAL(dev_priv)	(INTEL_INFO(dev_priv)->hws_needs_physical)
-
-#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
-		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
-#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
-		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
-		(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
-
-#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
-#define HAS_PPGTT(dev_priv) \
-	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
-#define HAS_FULL_PPGTT(dev_priv) \
-	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
-
-#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
+#define VDBOX_MASK(i915) \
+	ENGINE_INSTANCES_MASK(i915, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(i915) \
+	ENGINE_INSTANCES_MASK(i915, VECS0, I915_MAX_VECS)
+
+#define HAS_LLC(i915)	(INTEL_INFO(i915)->has_llc)
+#define HAS_SNOOP(i915)	(INTEL_INFO(i915)->has_snoop)
+#define HAS_EDRAM(i915)	((i915)->edram_size_mb)
+#define HAS_WT(i915)	((IS_HASWELL(i915) || \
+				 IS_BROADWELL(i915)) && HAS_EDRAM(i915))
+
+#define HWS_NEEDS_PHYSICAL(i915)	(INTEL_INFO(i915)->hws_needs_physical)
+
+#define HAS_LOGICAL_RING_CONTEXTS(i915) \
+		(INTEL_INFO(i915)->has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_ELSQ(i915) \
+		(INTEL_INFO(i915)->has_logical_ring_elsq)
+#define HAS_LOGICAL_RING_PREEMPTION(i915) \
+		(INTEL_INFO(i915)->has_logical_ring_preemption)
+
+#define HAS_EXECLISTS(i915) HAS_LOGICAL_RING_CONTEXTS(i915)
+
+#define INTEL_PPGTT(i915) (INTEL_INFO(i915)->ppgtt_type)
+#define HAS_PPGTT(i915) \
+	(INTEL_PPGTT(i915) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(i915) \
+	(INTEL_PPGTT(i915) >= INTEL_PPGTT_FULL)
+
+#define HAS_PAGE_SIZES(i915, sizes) ({ \
  	GEM_BUG_ON((sizes) == 0); \
-	((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
+	((sizes) & ~INTEL_INFO(i915)->page_sizes) == 0; \
  })
-#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
-		(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
+#define HAS_OVERLAY(i915)		 (INTEL_INFO(i915)->display.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(i915) \
+		(INTEL_INFO(i915)->display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define HAS_BROKEN_CS_TLB(i915)	(IS_I830(i915) || IS_I845G(i915))
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-	(IS_CANNONLAKE(dev_priv) || \
-	 IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
+	(IS_CANNONLAKE(i915) || \
+	 IS_SKL_GT3(i915) || IS_SKL_GT4(i915))
-#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
-#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
-					IS_GEMINILAKE(dev_priv) || \
-					IS_KABYLAKE(dev_priv))
+#define HAS_GMBUS_IRQ(i915) (INTEL_GEN(i915) >= 4)
+#define HAS_GMBUS_BURST_READ(i915) (INTEL_GEN(i915) >= 10 || \
+					IS_GEMINILAKE(i915) || \
+					IS_KABYLAKE(i915))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
   * rows, which changed the alignment requirements and fence programming.
   */
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
-					 !(IS_I915G(dev_priv) || \
-					 IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv)		(INTEL_INFO(dev_priv)->display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv)	(INTEL_INFO(dev_priv)->display.has_hotplug)
+#define HAS_128_BYTE_Y_TILING(i915) (!IS_GEN(i915, 2) && \
+					 !(IS_I915G(i915) || \
+					 IS_I915GM(i915)))
+#define SUPPORTS_TV(i915)		(INTEL_INFO(i915)->display.supports_tv)
+#define I915_HAS_HOTPLUG(i915)	(INTEL_INFO(i915)->display.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv)	(INTEL_INFO(dev_priv)->display.has_fbc)
-#define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
+#define HAS_FW_BLC(i915) 	(INTEL_GEN(i915) > 2)
+#define HAS_FBC(i915)	(INTEL_INFO(i915)->display.has_fbc)
+#define HAS_CUR_FBC(i915)	(!HAS_GMCH(i915) && INTEL_GEN(i915) >= 7)
-#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
+#define HAS_IPS(i915)	(IS_HSW_ULT(i915) || IS_BROADWELL(i915))
-#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
+#define HAS_DP_MST(i915)	(INTEL_INFO(i915)->display.has_dp_mst)
-#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
-#define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_TRANSCODER_EDP(dev_priv)	 (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
+#define HAS_DDI(i915)		 (INTEL_INFO(i915)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(i915) (INTEL_INFO(i915)->has_fpga_dbg)
+#define HAS_PSR(i915)		 (INTEL_INFO(i915)->display.has_psr)
+#define HAS_TRANSCODER_EDP(i915)	 (INTEL_INFO(i915)->trans_offsets[TRANSCODER_EDP] != 0)
-#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
-#define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
-#define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
+#define HAS_RC6(i915)		 (INTEL_INFO(i915)->has_rc6)
+#define HAS_RC6p(i915)		 (INTEL_INFO(i915)->has_rc6p)
+#define HAS_RC6pp(i915)		 (false) /* HW was never validated */
-#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
+#define HAS_RPS(i915)	(INTEL_INFO(i915)->has_rps)
-#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
+#define HAS_CSR(i915)	(INTEL_INFO(i915)->display.has_csr)
-#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
+#define HAS_RUNTIME_PM(i915) (INTEL_INFO(i915)->has_runtime_pm)
+#define HAS_64BIT_RELOC(i915) (INTEL_INFO(i915)->has_64bit_reloc)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_IPC(i915)		 (INTEL_INFO(i915)->display.has_ipc)
/*
   * For now, anything with a GuC requires uCode loading, and then supports
   * command submission once loaded. But these are logically independent
   * properties, so we have separate macros to test them.
   */
-#define HAS_GUC(dev_priv)	(INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
-#define HAS_GUC_SCHED(dev_priv)	(HAS_GUC(dev_priv))
+#define HAS_GUC(i915)	(INTEL_INFO(i915)->has_guc)
+#define HAS_GUC_UCODE(i915)	(HAS_GUC(i915))
+#define HAS_GUC_SCHED(i915)	(HAS_GUC(i915))
/* For now, anything with a GuC has also HuC */
-#define HAS_HUC(dev_priv)	(HAS_GUC(dev_priv))
-#define HAS_HUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
+#define HAS_HUC(i915)	(HAS_GUC(i915))
+#define HAS_HUC_UCODE(i915)	(HAS_GUC(i915))
/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv)		intel_uc_is_using_guc(dev_priv)
-#define USES_GUC_SUBMISSION(dev_priv)	intel_uc_is_using_guc_submission(dev_priv)
-#define USES_HUC(dev_priv)		intel_uc_is_using_huc(dev_priv)
+#define USES_GUC(i915)		intel_uc_is_using_guc(i915)
+#define USES_GUC_SUBMISSION(i915)	intel_uc_is_using_guc_submission(i915)
+#define USES_HUC(i915)		intel_uc_is_using_huc(i915)
-#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
+#define HAS_POOLED_EU(i915)	(INTEL_INFO(i915)->has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
  #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -2429,36 +2429,36 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
  #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev_priv) \
-	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
-	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev_priv) \
-	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
-	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-
-#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
-
-#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
+#define INTEL_PCH_TYPE(i915) ((i915)->pch_type)
+#define INTEL_PCH_ID(i915) ((i915)->pch_id)
+#define HAS_PCH_ICP(i915) (INTEL_PCH_TYPE(i915) == PCH_ICP)
+#define HAS_PCH_CNP(i915) (INTEL_PCH_TYPE(i915) == PCH_CNP)
+#define HAS_PCH_SPT(i915) (INTEL_PCH_TYPE(i915) == PCH_SPT)
+#define HAS_PCH_LPT(i915) (INTEL_PCH_TYPE(i915) == PCH_LPT)
+#define HAS_PCH_LPT_LP(i915) \
+	(INTEL_PCH_ID(i915) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+	 INTEL_PCH_ID(i915) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(i915) \
+	(INTEL_PCH_ID(i915) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+	 INTEL_PCH_ID(i915) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(i915) (INTEL_PCH_TYPE(i915) == PCH_CPT)
+#define HAS_PCH_IBX(i915) (INTEL_PCH_TYPE(i915) == PCH_IBX)
+#define HAS_PCH_NOP(i915) (INTEL_PCH_TYPE(i915) == PCH_NOP)
+#define HAS_PCH_SPLIT(i915) (INTEL_PCH_TYPE(i915) != PCH_NONE)
+
+#define HAS_GMCH(i915) (INTEL_INFO(i915)->display.has_gmch)
+
+#define HAS_LSPCON(i915) (INTEL_GEN(i915) >= 9)
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
-#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
-				 2 : HAS_L3_DPF(dev_priv))
+#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
+#define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \
+				 2 : HAS_L3_DPF(i915))
#define GT_FREQUENCY_MULTIPLIER 50
  #define GEN9_FREQ_SCALER 3
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+#define HAS_DISPLAY(i915) (INTEL_INFO(i915)->num_pipes > 0)
#include "i915_trace.h" @@ -2471,24 +2471,24 @@ static inline bool intel_vtd_active(void)
  	return false;
  }
-static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
  {
-	return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
+	return INTEL_GEN(i915) >= 6 && intel_vtd_active();
  }
static inline bool
-intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
  {
-	return IS_BROXTON(dev_priv) && intel_vtd_active();
+	return IS_BROXTON(i915) && intel_vtd_active();
  }
/* i915_drv.c */
  void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+__i915_printk(struct drm_i915_private *i915, const char *level,
  	      const char *fmt, ...);
-#define i915_report_error(dev_priv, fmt, ...) \
-	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+#define i915_report_error(i915, fmt, ...)				   \
+	__i915_printk(i915, KERN_ERR, fmt, ##__VA_ARGS__)
#ifdef CONFIG_COMPAT
  extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2503,12 +2503,12 @@ extern int i915_driver_load(struct pci_dev *pdev,
  extern void i915_driver_unload(struct drm_device *dev);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
-extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+extern void intel_hangcheck_init(struct drm_i915_private *i915);
+int vlv_force_gfx_clock(struct drm_i915_private *i915, bool on);
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *i915);
-static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
+static inline void i915_queue_hangcheck(struct drm_i915_private *i915)
  {
  	unsigned long delay;
@@ -2522,28 +2522,28 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
  	queue_delayed_work(system_long_wq,
-			   &dev_priv->gpu_error.hangcheck_work, delay);
+			   &i915->gpu_error.hangcheck_work, delay);
  }
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+static inline bool intel_gvt_active(struct drm_i915_private *i915)
  {
-	return dev_priv->gvt;
+	return i915->gvt;
  }
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+static inline bool intel_vgpu_active(struct drm_i915_private *i915)
  {
-	return dev_priv->vgpu.active;
+	return i915->vgpu.active;
  }
/* i915_gem.c */
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
+int i915_gem_init_userptr(struct drm_i915_private *i915);
+void i915_gem_cleanup_userptr(struct drm_i915_private *i915);
  void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
-int i915_gem_freeze(struct drm_i915_private *dev_priv);
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+int i915_gem_init_early(struct drm_i915_private *i915);
+void i915_gem_cleanup_early(struct drm_i915_private *i915);
+void i915_gem_load_init_fences(struct drm_i915_private *i915);
+int i915_gem_freeze(struct drm_i915_private *i915);
+int i915_gem_freeze_late(struct drm_i915_private *i915);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
  {
@@ -2591,7 +2591,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj); -void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_runtime_suspend(struct drm_i915_private *i915);
static inline int __must_check
  i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -2633,20 +2633,20 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
  	return READ_ONCE(error->reset_engine_count[engine->id]);
  }
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *i915);
+bool i915_gem_unset_wedged(struct drm_i915_private *i915);
void i915_gem_init_mmio(struct drm_i915_private *i915);
-int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
-void i915_gem_fini(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_init(struct drm_i915_private *i915);
+int __must_check i915_gem_init_hw(struct drm_i915_private *i915);
+void i915_gem_init_swizzling(struct drm_i915_private *i915);
+void i915_gem_fini_hw(struct drm_i915_private *i915);
+void i915_gem_fini(struct drm_i915_private *i915);
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
  			   unsigned int flags, long timeout);
-void i915_gem_suspend(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
-void i915_gem_resume(struct drm_i915_private *dev_priv);
+void i915_gem_suspend(struct drm_i915_private *i915);
+void i915_gem_suspend_late(struct drm_i915_private *i915);
+void i915_gem_resume(struct drm_i915_private *i915);
  vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
@@ -2663,12 +2663,12 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
/* i915_gem_fence_reg.c */
  struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv);
+i915_reserve_fence(struct drm_i915_private *i915);
  void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
+void i915_gem_restore_fences(struct drm_i915_private *i915);
-void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *i915);
  void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
  				       struct sg_table *pages);
  void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
@@ -2715,40 +2715,40 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
  					 unsigned int flags);
  int i915_gem_evict_vm(struct i915_address_space *vm);
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *i915);
/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *i915)
  {
  	wmb();
-	if (INTEL_GEN(dev_priv) < 6)
+	if (INTEL_GEN(i915) < 6)
  		intel_gtt_chipset_flush();
  }
/* i915_gem_stolen.c */
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
  				struct drm_mm_node *node, u64 size,
  				unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
  					 struct drm_mm_node *node, u64 size,
  					 unsigned alignment, u64 start,
  					 u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
  				 struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+int i915_gem_init_stolen(struct drm_i915_private *i915);
+void i915_gem_cleanup_stolen(struct drm_i915_private *i915);
  struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
  			      resource_size_t size);
  struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
  					       resource_size_t stolen_offset,
  					       resource_size_t gtt_offset,
  					       resource_size_t size);
/* i915_gem_internal.c */
  struct drm_i915_gem_object *
-i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+i915_gem_object_create_internal(struct drm_i915_private *i915,
  				phys_addr_t size);
/* i915_gem_shrinker.c */
@@ -2771,21 +2771,21 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
  /* i915_gem_tiling.c */
  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+	return i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  		i915_gem_object_is_tiled(obj);
  }
-u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
+u32 i915_gem_fence_size(struct drm_i915_private *i915, u32 size,
  			unsigned int tiling, unsigned int stride);
-u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
+u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
  			     unsigned int tiling, unsigned int stride);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */
-int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
+int i915_cmd_parser_get_version(struct drm_i915_private *i915);
  void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
  void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
  int intel_engine_cmd_parser(struct intel_engine_cs *engine,
@@ -2796,52 +2796,52 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
  			    bool is_master);
/* i915_perf.c */
-extern void i915_perf_init(struct drm_i915_private *dev_priv);
-extern void i915_perf_fini(struct drm_i915_private *dev_priv);
-extern void i915_perf_register(struct drm_i915_private *dev_priv);
-extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
+extern void i915_perf_init(struct drm_i915_private *i915);
+extern void i915_perf_fini(struct drm_i915_private *i915);
+extern void i915_perf_register(struct drm_i915_private *i915);
+extern void i915_perf_unregister(struct drm_i915_private *i915);
/* i915_suspend.c */
-extern int i915_save_state(struct drm_i915_private *dev_priv);
-extern int i915_restore_state(struct drm_i915_private *dev_priv);
+extern int i915_save_state(struct drm_i915_private *i915);
+extern int i915_restore_state(struct drm_i915_private *i915);
/* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_i915_private *dev_priv);
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *i915);
+void i915_teardown_sysfs(struct drm_i915_private *i915);
/* intel_device_info.c */
  static inline struct intel_device_info *
-mkwrite_device_info(struct drm_i915_private *dev_priv)
+mkwrite_device_info(struct drm_i915_private *i915)
  {
-	return (struct intel_device_info *)INTEL_INFO(dev_priv);
+	return (struct intel_device_info *)INTEL_INFO(i915);
  }
/* modesetting */
  extern void intel_modeset_init_hw(struct drm_device *dev);
  extern int intel_modeset_init(struct drm_device *dev);
  extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+extern int intel_modeset_vga_set_state(struct drm_i915_private *i915,
  				       bool state);
  extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
-extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga(struct drm_i915_private *i915);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *i915);
+extern void intel_init_pch_refclk(struct drm_i915_private *i915);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
  			struct drm_file *file);
extern struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
+intel_display_capture_error_state(struct drm_i915_private *i915);
  extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  					    struct intel_display_error_state *error);
#define __I915_REG_OP(op__, dev_priv__, ...) \
  	intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
+#define I915_READ(reg__)	 __I915_REG_OP(read, i915, (reg__))
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, i915, (reg__), (val__))
-#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
+#define POSTING_READ(reg__)	__I915_REG_OP(posting_read, i915, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
   * critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -2851,13 +2851,13 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
   *
   * As an example, these accessors can possibly be used between:
   *
- * spin_lock_irq(&dev_priv->uncore.lock);
+ * spin_lock_irq(&i915->uncore.lock);
   * intel_uncore_forcewake_get__locked();
   *
   * and
   *
   * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
+ * spin_unlock_irq(&i915->uncore.lock);
   *
   *
   * Note: some registers may not need forcewake held, so
@@ -2866,18 +2866,18 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
   *
   * Certain architectures will die if the same cacheline is concurrently accessed
   * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * therefore generally be serialised, by either the i915->uncore.lock or
   * a more localised lock guarding all access to that bank of registers.
   */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, i915, (reg__))
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, i915, (reg__), (val__))
/* "Broadcast RGB" property */
  #define INTEL_BROADCAST_RGB_AUTO 0
  #define INTEL_BROADCAST_RGB_FULL 1
  #define INTEL_BROADCAST_RGB_LIMITED 2
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
+void i915_memcpy_init_early(struct drm_i915_private *i915);
  bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4017ecf561f6..1774c7604076 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -149,7 +149,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
static int
  i915_gem_create(struct drm_file *file,
-		struct drm_i915_private *dev_priv,
+		struct drm_i915_private *i915,
  		u64 *size_p,
  		u32 *handle_p)
  {
@@ -163,7 +163,7 @@ i915_gem_create(struct drm_file *file,
  		return -EINVAL;
/* Allocate the new object */
-	obj = i915_gem_object_create_shmem(dev_priv, size);
+	obj = i915_gem_object_create_shmem(i915, size);
  	if (IS_ERR(obj))
  		return PTR_ERR(obj);
@@ -223,16 +223,16 @@ int
  i915_gem_create_ioctl(struct drm_device *dev, void *data,
  		      struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(dev_priv);
+	i915_gem_flush_free_objects(i915);
- return i915_gem_create(file, dev_priv,
+	return i915_gem_create(file, i915,
  			       &args->size, &args->handle);
  }
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref;
@@ -257,13 +257,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) wmb(); - if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
+	if (INTEL_INFO(i915)->has_coherent_ggtt)
  		return;
- i915_gem_chipset_flush(dev_priv);
+	i915_gem_chipset_flush(i915);
- with_intel_runtime_pm(dev_priv, wakeref) {
-		struct intel_uncore *uncore = &dev_priv->uncore;
+	with_intel_runtime_pm(i915, wakeref) {
+		struct intel_uncore *uncore = &i915->uncore;
spin_lock_irq(&uncore->lock);
  		intel_uncore_posting_read_fw(uncore,
@@ -884,7 +884,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  	return 0;
  }
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *i915)
  {
  	struct drm_i915_gem_object *obj, *on;
  	int i;
@@ -897,15 +897,15 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
  	 */
list_for_each_entry_safe(obj, on,
-				 &dev_priv->mm.userfault_list, userfault_link)
+				 &i915->mm.userfault_list, userfault_link)
  		__i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
  	 * in use by hardware (i.e. they are pinned), we should not be powering
  	 * down! All other fences will be reacquired by the user upon waking.
  	 */
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+	for (i = 0; i < i915->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &i915->fence_regs[i];
/* Ideally we want to assert that the fence register is not
  		 * live at this point (i.e. that no piece of code will be
@@ -1020,8 +1020,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  			 u64 alignment,
  			 u64 flags)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_address_space *vm = &dev_priv->ggtt.vm;
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_address_space *vm = &i915->ggtt.vm;
  	struct i915_vma *vma;
  	int ret;
@@ -1036,7 +1036,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  		 * the object in and out of the Global GTT and
  		 * waste a lot of cycles under the mutex.
  		 */
-		if (obj->base.size > dev_priv->ggtt.mappable_end)
+		if (obj->base.size > i915->ggtt.mappable_end)
  			return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
@@ -1055,7 +1055,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  		 * we could try to minimise harm to others.
  		 */
  		if (flags & PIN_NONBLOCK &&
-		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
+		    obj->base.size > i915->ggtt.mappable_end / 2)
  			return ERR_PTR(-ENOSPC);
  	}
@@ -1069,7 +1069,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  				return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE &&
-			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
+			    vma->fence_size > i915->ggtt.mappable_end / 2)
  				return ERR_PTR(-ENOSPC);
  		}
@@ -1202,30 +1202,30 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
  	mutex_unlock(&i915->drm.struct_mutex);
  }
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+void i915_gem_init_swizzling(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) < 5 ||
-	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+	if (INTEL_GEN(i915) < 5 ||
+	    i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  		return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  				 DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN(dev_priv, 5))
+	if (IS_GEN(i915, 5))
  		return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
-	if (IS_GEN(dev_priv, 6))
+	if (IS_GEN(i915, 6))
  		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-	else if (IS_GEN(dev_priv, 7))
+	else if (IS_GEN(i915, 7))
  		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-	else if (IS_GEN(dev_priv, 8))
+	else if (IS_GEN(i915, 8))
  		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  	else
  		BUG();
  }
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
+static void init_unused_ring(struct drm_i915_private *i915, u32 base)
  {
  	I915_WRITE(RING_CTL(base), 0);
  	I915_WRITE(RING_HEAD(base), 0);
@@ -1233,45 +1233,45 @@ static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
  	I915_WRITE(RING_START(base), 0);
  }
-static void init_unused_rings(struct drm_i915_private *dev_priv)
+static void init_unused_rings(struct drm_i915_private *i915)
  {
-	if (IS_I830(dev_priv)) {
-		init_unused_ring(dev_priv, PRB1_BASE);
-		init_unused_ring(dev_priv, SRB0_BASE);
-		init_unused_ring(dev_priv, SRB1_BASE);
-		init_unused_ring(dev_priv, SRB2_BASE);
-		init_unused_ring(dev_priv, SRB3_BASE);
-	} else if (IS_GEN(dev_priv, 2)) {
-		init_unused_ring(dev_priv, SRB0_BASE);
-		init_unused_ring(dev_priv, SRB1_BASE);
-	} else if (IS_GEN(dev_priv, 3)) {
-		init_unused_ring(dev_priv, PRB1_BASE);
-		init_unused_ring(dev_priv, PRB2_BASE);
+	if (IS_I830(i915)) {
+		init_unused_ring(i915, PRB1_BASE);
+		init_unused_ring(i915, SRB0_BASE);
+		init_unused_ring(i915, SRB1_BASE);
+		init_unused_ring(i915, SRB2_BASE);
+		init_unused_ring(i915, SRB3_BASE);
+	} else if (IS_GEN(i915, 2)) {
+		init_unused_ring(i915, SRB0_BASE);
+		init_unused_ring(i915, SRB1_BASE);
+	} else if (IS_GEN(i915, 3)) {
+		init_unused_ring(i915, PRB1_BASE);
+		init_unused_ring(i915, PRB2_BASE);
  	}
  }
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+int i915_gem_init_hw(struct drm_i915_private *i915)
  {
  	int ret;
- dev_priv->gt.last_init_time = ktime_get();
+	i915->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
+	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
  		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HASWELL(dev_priv))
-		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
+	if (IS_HASWELL(i915))
+		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(i915) ?
  			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
-	intel_gt_apply_workarounds(dev_priv);
+	intel_gt_apply_workarounds(i915);
  	/* ...and determine whether they are sticking. */
-	intel_gt_verify_workarounds(dev_priv, "init");
+	intel_gt_verify_workarounds(i915, "init");
- i915_gem_init_swizzling(dev_priv);
+	i915_gem_init_swizzling(i915);
/*
  	 * At least 830 can leave some of the unused rings
@@ -1279,48 +1279,48 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
  	 * will prevent c3 entry. Makes sure all unused rings
  	 * are totally idle.
  	 */
-	init_unused_rings(dev_priv);
+	init_unused_rings(i915);
- BUG_ON(!dev_priv->kernel_context);
-	ret = i915_terminally_wedged(dev_priv);
+	BUG_ON(!i915->kernel_context);
+	ret = i915_terminally_wedged(i915);
  	if (ret)
  		goto out;
- ret = i915_ppgtt_init_hw(dev_priv);
+	ret = i915_ppgtt_init_hw(i915);
  	if (ret) {
  		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
  		goto out;
  	}
- ret = intel_wopcm_init_hw(&dev_priv->wopcm);
+	ret = intel_wopcm_init_hw(&i915->wopcm);
  	if (ret) {
  		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
  		goto out;
  	}
/* We can't enable contexts until all firmware is loaded */
-	ret = intel_uc_init_hw(dev_priv);
+	ret = intel_uc_init_hw(i915);
  	if (ret) {
  		DRM_ERROR("Enabling uc failed (%d)\n", ret);
  		goto out;
  	}
- intel_mocs_init_l3cc_table(dev_priv);
+	intel_mocs_init_l3cc_table(i915);
/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_engines_resume(dev_priv);
+	ret = intel_engines_resume(i915);
  	if (ret)
  		goto cleanup_uc;
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_engines_set_scheduler_caps(dev_priv);
+	intel_engines_set_scheduler_caps(i915);
  	return 0;
cleanup_uc:
-	intel_uc_fini_hw(dev_priv);
+	intel_uc_fini_hw(i915);
  out:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
  }
@@ -1505,28 +1505,28 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
  	return err;
  }
-int i915_gem_init(struct drm_i915_private *dev_priv)
+int i915_gem_init(struct drm_i915_private *i915)
  {
  	int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
-	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-		mkwrite_device_info(dev_priv)->page_sizes =
+	if (intel_vgpu_active(i915) && !intel_vgpu_has_huge_gtt(i915))
+		mkwrite_device_info(i915)->page_sizes =
  			I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
+	i915->mm.unordered_timeline = dma_fence_context_alloc(1);
- i915_timelines_init(dev_priv);
+	i915_timelines_init(i915);
- ret = i915_gem_init_userptr(dev_priv);
+	ret = i915_gem_init_userptr(i915);
  	if (ret)
  		return ret;
- ret = intel_uc_init_misc(dev_priv);
+	ret = intel_uc_init_misc(i915);
  	if (ret)
  		return ret;
- ret = intel_wopcm_init(&dev_priv->wopcm);
+	ret = intel_wopcm_init(&i915->wopcm);
  	if (ret)
  		goto err_uc_misc;
@@ -1536,47 +1536,47 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
  	 * we hold the forcewake during initialisation these problems
  	 * just magically go away.
  	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- ret = i915_gem_init_ggtt(dev_priv);
+	ret = i915_gem_init_ggtt(i915);
  	if (ret) {
  		GEM_BUG_ON(ret == -EIO);
  		goto err_unlock;
  	}
- ret = i915_gem_init_scratch(dev_priv,
-				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
+	ret = i915_gem_init_scratch(i915,
+				    IS_GEN(i915, 2) ? SZ_256K : PAGE_SIZE);
  	if (ret) {
  		GEM_BUG_ON(ret == -EIO);
  		goto err_ggtt;
  	}
- ret = intel_engines_setup(dev_priv);
+	ret = intel_engines_setup(i915);
  	if (ret) {
  		GEM_BUG_ON(ret == -EIO);
  		goto err_unlock;
  	}
- ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_contexts_init(i915);
  	if (ret) {
  		GEM_BUG_ON(ret == -EIO);
  		goto err_scratch;
  	}
- ret = intel_engines_init(dev_priv);
+	ret = intel_engines_init(i915);
  	if (ret) {
  		GEM_BUG_ON(ret == -EIO);
  		goto err_context;
  	}
- intel_init_gt_powersave(dev_priv);
+	intel_init_gt_powersave(i915);
- ret = intel_uc_init(dev_priv);
+	ret = intel_uc_init(i915);
  	if (ret)
  		goto err_pm;
- ret = i915_gem_init_hw(dev_priv);
+	ret = i915_gem_init_hw(i915);
  	if (ret)
  		goto err_uc_init;
@@ -1589,13 +1589,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
  	 *
  	 * FIXME: break up the workarounds and apply them at the right time!
  	 */
-	intel_init_clock_gating(dev_priv);
+	intel_init_clock_gating(i915);
- ret = intel_engines_verify_workarounds(dev_priv);
+	ret = intel_engines_verify_workarounds(i915);
  	if (ret)
  		goto err_init_hw;
- ret = __intel_engines_record_defaults(dev_priv);
+	ret = __intel_engines_record_defaults(i915);
  	if (ret)
  		goto err_init_hw;
@@ -1609,8 +1609,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
  		goto err_init_hw;
  	}
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
return 0; @@ -1621,107 +1621,107 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
  	 * driver doesn't explode during runtime.
  	 */
  err_init_hw:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_set_wedged(dev_priv);
-	i915_gem_suspend(dev_priv);
-	i915_gem_suspend_late(dev_priv);
+	i915_gem_set_wedged(i915);
+	i915_gem_suspend(i915);
+	i915_gem_suspend_late(i915);
- i915_gem_drain_workqueue(dev_priv);
+	i915_gem_drain_workqueue(i915);
- mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uc_fini_hw(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uc_fini_hw(i915);
  err_uc_init:
-	intel_uc_fini(dev_priv);
+	intel_uc_fini(i915);
  err_pm:
  	if (ret != -EIO) {
-		intel_cleanup_gt_powersave(dev_priv);
-		intel_engines_cleanup(dev_priv);
+		intel_cleanup_gt_powersave(i915);
+		intel_engines_cleanup(i915);
  	}
  err_context:
  	if (ret != -EIO)
-		i915_gem_contexts_fini(dev_priv);
+		i915_gem_contexts_fini(i915);
  err_scratch:
-	i915_gem_fini_scratch(dev_priv);
+	i915_gem_fini_scratch(i915);
  err_ggtt:
  err_unlock:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
err_uc_misc:
-	intel_uc_fini_misc(dev_priv);
+	intel_uc_fini_misc(i915);
if (ret != -EIO) {
-		i915_gem_cleanup_userptr(dev_priv);
-		i915_timelines_fini(dev_priv);
+		i915_gem_cleanup_userptr(i915);
+		i915_timelines_fini(i915);
  	}
if (ret == -EIO) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&i915->drm.struct_mutex);
/*
  		 * Allow engine initialisation to fail by marking the GPU as
  		 * wedged. But we only want to do this where the GPU is angry,
  		 * for all other failure, such as an allocation failure, bail.
  		 */
-		if (!i915_reset_failed(dev_priv)) {
-			i915_load_error(dev_priv,
+		if (!i915_reset_failed(i915)) {
+			i915_load_error(i915,
  					"Failed to initialize GPU, declaring it wedged!\n");
-			i915_gem_set_wedged(dev_priv);
+			i915_gem_set_wedged(i915);
  		}
/* Minimal basic recovery for KMS */
-		ret = i915_ggtt_enable_hw(dev_priv);
-		i915_gem_restore_gtt_mappings(dev_priv);
-		i915_gem_restore_fences(dev_priv);
-		intel_init_clock_gating(dev_priv);
+		ret = i915_ggtt_enable_hw(i915);
+		i915_gem_restore_gtt_mappings(i915);
+		i915_gem_restore_fences(i915);
+		intel_init_clock_gating(i915);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
  	}
- i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
  	return ret;
  }
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
+void i915_gem_fini_hw(struct drm_i915_private *i915)
  {
-	GEM_BUG_ON(dev_priv->gt.awake);
+	GEM_BUG_ON(i915->gt.awake);
- intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
+	intel_wakeref_auto_fini(&i915->mm.userfault_wakeref);
- i915_gem_suspend_late(dev_priv);
-	intel_disable_gt_powersave(dev_priv);
+	i915_gem_suspend_late(i915);
+	intel_disable_gt_powersave(i915);
/* Flush any outstanding unpin_work. */
-	i915_gem_drain_workqueue(dev_priv);
+	i915_gem_drain_workqueue(i915);
- mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uc_fini_hw(dev_priv);
-	intel_uc_fini(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uc_fini_hw(i915);
+	intel_uc_fini(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
  }
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_fini(struct drm_i915_private *i915)
  {
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_engines_cleanup(dev_priv);
-	i915_gem_contexts_fini(dev_priv);
-	i915_gem_fini_scratch(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_engines_cleanup(i915);
+	i915_gem_contexts_fini(i915);
+	i915_gem_fini_scratch(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
- intel_wa_list_free(&dev_priv->gt_wa_list);
+	intel_wa_list_free(&i915->gt_wa_list);
- intel_cleanup_gt_powersave(dev_priv);
+	intel_cleanup_gt_powersave(i915);
- intel_uc_fini_misc(dev_priv);
-	i915_gem_cleanup_userptr(dev_priv);
-	i915_timelines_fini(dev_priv);
+	intel_uc_fini_misc(i915);
+	i915_gem_cleanup_userptr(i915);
+	i915_timelines_fini(i915);
- i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
+	WARN_ON(!list_empty(&i915->contexts.list));
  }
void i915_gem_init_mmio(struct drm_i915_private *i915)
@@ -1730,35 +1730,35 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
  }
void
-i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+i915_gem_load_init_fences(struct drm_i915_private *i915)
  {
  	int i;
- if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
-	    !IS_CHERRYVIEW(dev_priv))
-		dev_priv->num_fence_regs = 32;
-	else if (INTEL_GEN(dev_priv) >= 4 ||
-		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
-		dev_priv->num_fence_regs = 16;
+	if (INTEL_GEN(i915) >= 7 && !IS_VALLEYVIEW(i915) &&
+	    !IS_CHERRYVIEW(i915))
+		i915->num_fence_regs = 32;
+	else if (INTEL_GEN(i915) >= 4 ||
+		 IS_I945G(i915) || IS_I945GM(i915) ||
+		 IS_G33(i915) || IS_PINEVIEW(i915))
+		i915->num_fence_regs = 16;
  	else
-		dev_priv->num_fence_regs = 8;
+		i915->num_fence_regs = 8;
- if (intel_vgpu_active(dev_priv))
-		dev_priv->num_fence_regs =
+	if (intel_vgpu_active(i915))
+		i915->num_fence_regs =
  				I915_READ(vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+	for (i = 0; i < i915->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *fence = &i915->fence_regs[i];
- fence->i915 = dev_priv;
+		fence->i915 = i915;
  		fence->id = i;
-		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
+		list_add_tail(&fence->link, &i915->mm.fence_list);
  	}
-	i915_gem_restore_fences(dev_priv);
+	i915_gem_restore_fences(i915);
- i915_gem_detect_bit_6_swizzle(dev_priv);
+	i915_gem_detect_bit_6_swizzle(i915);
  }
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1778,56 +1778,56 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
  	i915_gem_init__objects(i915);
  }
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+int i915_gem_init_early(struct drm_i915_private *i915)
  {
  	static struct lock_class_key reset_key;
  	int err;
- intel_gt_pm_init(dev_priv);
+	intel_gt_pm_init(i915);
- INIT_LIST_HEAD(&dev_priv->gt.active_rings);
-	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
-	spin_lock_init(&dev_priv->gt.closed_lock);
-	lockdep_init_map(&dev_priv->gt.reset_lockmap,
+	INIT_LIST_HEAD(&i915->gt.active_rings);
+	INIT_LIST_HEAD(&i915->gt.closed_vma);
+	spin_lock_init(&i915->gt.closed_lock);
+	lockdep_init_map(&i915->gt.reset_lockmap,
  			 "i915.reset", &reset_key, 0);
- i915_gem_init__mm(dev_priv);
-	i915_gem_init__pm(dev_priv);
+	i915_gem_init__mm(i915);
+	i915_gem_init__pm(i915);
- init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
-	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
-	mutex_init(&dev_priv->gpu_error.wedge_mutex);
-	init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
+	init_waitqueue_head(&i915->gpu_error.wait_queue);
+	init_waitqueue_head(&i915->gpu_error.reset_queue);
+	mutex_init(&i915->gpu_error.wedge_mutex);
+	init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+	atomic_set(&i915->mm.bsd_engine_dispatch_index, 0);
- spin_lock_init(&dev_priv->fb_tracking.lock);
+	spin_lock_init(&i915->fb_tracking.lock);
- err = i915_gemfs_init(dev_priv);
+	err = i915_gemfs_init(i915);
  	if (err)
  		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
  }
-void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
+void i915_gem_cleanup_early(struct drm_i915_private *i915)
  {
-	i915_gem_drain_freed_objects(dev_priv);
-	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
-	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
-	WARN_ON(dev_priv->mm.shrink_count);
+	i915_gem_drain_freed_objects(i915);
+	GEM_BUG_ON(!llist_empty(&i915->mm.free_list));
+	GEM_BUG_ON(atomic_read(&i915->mm.free_count));
+	WARN_ON(i915->mm.shrink_count);
- cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
+	cleanup_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- i915_gemfs_fini(dev_priv);
+	i915_gemfs_fini(i915);
  }
-int i915_gem_freeze(struct drm_i915_private *dev_priv)
+int i915_gem_freeze(struct drm_i915_private *i915)
  {
  	/* Discard all purgeable objects, let userspace recover those as
  	 * required after resuming.
  	 */
-	i915_gem_shrink_all(dev_priv);
+	i915_gem_shrink_all(i915);
return 0;
  }
@@ -1895,7 +1895,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
  		return -ENOMEM;
file->driver_priv = file_priv;
-	file_priv->dev_priv = i915;
+	file_priv->i915 = i915;
  	file_priv->file = file;
spin_lock_init(&file_priv->mm.lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a5783c4cb98b..4244d9c54d66 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -99,7 +99,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
  			 u64 start, u64 end,
  			 unsigned flags)
  {
-	struct drm_i915_private *dev_priv = vm->i915;
+	struct drm_i915_private *i915 = vm->i915;
  	struct drm_mm_scan scan;
  	struct list_head eviction_list;
  	struct i915_vma *vma, *next;
@@ -138,7 +138,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
  	 * retiring.
  	 */
  	if (!(flags & PIN_NONBLOCK))
-		i915_retire_requests(dev_priv);
+		i915_retire_requests(i915);
search_again:
  	active = NULL;
@@ -211,7 +211,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
  	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
  		return -EBUSY;
- ret = ggtt_flush(dev_priv);
+	ret = ggtt_flush(i915);
  	if (ret)
  		return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e70675bfb51d..42d88dae9c5d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1294,7 +1294,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
  static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
  {
  	struct i915_address_space *vm = &ppgtt->vm;
-	struct drm_i915_private *dev_priv = vm->i915;
+	struct drm_i915_private *i915 = vm->i915;
  	enum vgt_g2v_type msg;
  	int i;
@@ -1692,7 +1692,7 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
  		  ppgtt->pd_addr + pde);
  }
-static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen7_ppgtt_enable(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	u32 ecochk, ecobits;
@@ -1702,7 +1702,7 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
  	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
-	if (IS_HASWELL(dev_priv)) {
+	if (IS_HASWELL(i915)) {
  		ecochk |= ECOCHK_PPGTT_WB_HSW;
  	} else {
  		ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1710,7 +1710,7 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
  	}
  	I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		/* GFX_MODE is per-ring on gen7+ */
  		ENGINE_WRITE(engine,
  			     RING_MODE_GEN7,
@@ -1718,7 +1718,7 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
  	}
  }
-static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen6_ppgtt_enable(struct drm_i915_private *i915)
  {
  	u32 ecochk, gab_ctl, ecobits;
@@ -1732,7 +1732,7 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
  	ecochk = I915_READ(GAM_ECOCHK);
  	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+	if (HAS_PPGTT(i915)) /* may be disabled for VT-d */
  		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
  }
@@ -2185,20 +2185,20 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
  	return ERR_PTR(err);
  }
-static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+static void gtt_write_workarounds(struct drm_i915_private *i915)
  {
  	/* This function is for gtt related workarounds. This function is
  	 * called on driver load and after a GPU reset, so you can place
  	 * workarounds here even if they get overwritten by GPU reset.
  	 */
  	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
-	if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(i915))
  		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (IS_CHERRYVIEW(i915))
  		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
-	else if (INTEL_GEN(dev_priv) >= 9)
+	else if (INTEL_GEN(i915) >= 9)
  		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
@@ -2212,21 +2212,21 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
  	 * 32K pages, but we don't currently have any support for it in our
  	 * driver.
  	 */
-	if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
-	    INTEL_GEN(dev_priv) <= 10)
+	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+	    INTEL_GEN(i915) <= 10)
  		I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
  			   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
  			   GAMW_ECO_ENABLE_64K_IPS_FIELD);
  }
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ppgtt_init_hw(struct drm_i915_private *i915)
  {
-	gtt_write_workarounds(dev_priv);
+	gtt_write_workarounds(i915);
- if (IS_GEN(dev_priv, 6))
-		gen6_ppgtt_enable(dev_priv);
-	else if (IS_GEN(dev_priv, 7))
-		gen7_ppgtt_enable(dev_priv);
+	if (IS_GEN(i915, 6))
+		gen6_ppgtt_enable(i915);
+	else if (IS_GEN(i915, 7))
+		gen7_ppgtt_enable(i915);
return 0;
  }
@@ -2293,29 +2293,29 @@ void i915_vm_release(struct kref *kref)
  /* Certain Gen5 chipsets require require idling the GPU before
   * unmapping anything from the GTT when VT-d is enabled.
   */
-static bool needs_idle_maps(struct drm_i915_private *dev_priv)
+static bool needs_idle_maps(struct drm_i915_private *i915)
  {
  	/* Query intel_iommu to see if we need the workaround. Presumably that
  	 * was loaded first.
  	 */
-	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
+	return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
  }
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
  	 * documentation supporting that it's a good idea.
  	 */
-	if (INTEL_GEN(dev_priv) < 6)
+	if (INTEL_GEN(i915) < 6)
  		return;
- i915_check_and_clear_faults(dev_priv);
+	i915_check_and_clear_faults(i915);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - i915_ggtt_invalidate(dev_priv);
+	i915_ggtt_invalidate(i915);
  }
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2411,7 +2411,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
   * Binds an object into the global gtt with the specified cache level. The object
   * will be accessible to the GPU via commands whose operands reference offsets
   * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
+ * mapped BAR (i915->mm.gtt->gtt).
   */
  static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
  				     struct i915_vma *vma,
@@ -2461,7 +2461,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
  {
-	struct drm_i915_private *dev_priv = vm->i915;
+	struct drm_i915_private *i915 = vm->i915;
/*
  	 * Make sure the internal GAM fifo has been cleared of all GTT
@@ -2705,12 +2705,12 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
  void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
  			       struct sg_table *pages)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct device *kdev = &dev_priv->drm.pdev->dev;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct device *kdev = &i915->drm.pdev->dev;
+	struct i915_ggtt *ggtt = &i915->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
-		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+		if (i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT)) {
  			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
  			/* Wait a bit, in hopes it avoids the hang */
  			udelay(10);
@@ -2834,7 +2834,7 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
  		drm_mm_remove_node(&ggtt->uc_fw);
  }
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
+int i915_gem_init_ggtt(struct drm_i915_private *i915)
  {
  	/* Let GEM Manage all of the aperture.
  	 *
@@ -2845,7 +2845,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
  	 * aperture.  One page should be enough to keep any prefetching inside
  	 * of the aperture.
  	 */
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	unsigned long hole_start, hole_end;
  	struct drm_mm_node *entry;
  	int ret;
@@ -2857,9 +2857,9 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
  	 * why.
  	 */
  	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
-			       intel_wopcm_guc_size(&dev_priv->wopcm));
+			       intel_wopcm_guc_size(&i915->wopcm));
- ret = intel_vgt_balloon(dev_priv);
+	ret = intel_vgt_balloon(i915);
  	if (ret)
  		return ret;
@@ -2891,8 +2891,8 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
  	/* And finally clear the reserved guard page */
  	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
-		ret = init_aliasing_ppgtt(dev_priv);
+	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+		ret = init_aliasing_ppgtt(i915);
  		if (ret)
  			goto err_appgtt;
  	}
@@ -2908,18 +2908,18 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/**
   * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
+ * @i915: i915 device
   */
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
+void i915_ggtt_cleanup_hw(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	struct i915_vma *vma, *vn;
  	struct pagevec *pvec;
ggtt->vm.closed = true; - mutex_lock(&dev_priv->drm.struct_mutex);
-	fini_aliasing_ppgtt(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	fini_aliasing_ppgtt(i915);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
  		WARN_ON(i915_vma_unbind(vma));
@@ -2930,24 +2930,24 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
  	ggtt_release_guc_top(ggtt);
if (drm_mm_initialized(&ggtt->vm.mm)) {
-		intel_vgt_deballoon(dev_priv);
+		intel_vgt_deballoon(i915);
  		i915_address_space_fini(&ggtt->vm);
  	}
ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash.pvec;
+	pvec = &i915->mm.wc_stash.pvec;
  	if (pvec->nr) {
  		set_pages_array_wb(pvec->pages, pvec->nr);
  		__pagevec_release(pvec);
  	}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
  	io_mapping_fini(&ggtt->iomap);
- i915_gem_cleanup_stolen(dev_priv);
+	i915_gem_cleanup_stolen(i915);
  }
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2986,8 +2986,8 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
  {
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
  	phys_addr_t phys_addr;
  	int ret;
@@ -3001,7 +3001,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
  	 * resort to an uncached mapping. The WC issue is easily caught by the
  	 * readback check when writing GTT PTE entries.
  	 */
-	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
+	if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
  		ggtt->gsm = ioremap_nocache(phys_addr, size);
  	else
  		ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -3135,9 +3135,9 @@ void intel_ppat_put(const struct intel_ppat_entry *entry)
  	kref_put(&ppat->entries[index].ref, release_ppat);
  }
-static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
+static void cnl_private_pat_update_hw(struct drm_i915_private *i915)
  {
-	struct intel_ppat *ppat = &dev_priv->ppat;
+	struct intel_ppat *ppat = &i915->ppat;
  	int i;
for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
@@ -3146,9 +3146,9 @@ static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
  	}
  }
-static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+static void bdw_private_pat_update_hw(struct drm_i915_private *i915)
  {
-	struct intel_ppat *ppat = &dev_priv->ppat;
+	struct intel_ppat *ppat = &i915->ppat;
  	u64 pat = 0;
  	int i;
@@ -3293,16 +3293,16 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
  	cleanup_scratch_page(vm);
  }
-static void setup_private_pat(struct drm_i915_private *dev_priv)
+static void setup_private_pat(struct drm_i915_private *i915)
  {
-	struct intel_ppat *ppat = &dev_priv->ppat;
+	struct intel_ppat *ppat = &i915->ppat;
  	int i;
- ppat->i915 = dev_priv;
+	ppat->i915 = i915;
- if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(i915) >= 10)
  		cnl_setup_private_ppat(ppat);
-	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
  		chv_setup_private_ppat(ppat);
  	else
  		bdw_setup_private_ppat(ppat);
@@ -3315,13 +3315,13 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
  		set_bit(i, ppat->dirty);
  	}
- ppat->update_hw(dev_priv);
+	ppat->update_hw(i915);
  }
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
  {
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
  	unsigned int size;
  	u16 snb_gmch_ctl;
  	int err;
@@ -3339,7 +3339,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
  		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-	if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		size = chv_get_total_gtt_size(snb_gmch_ctl);
  	else
  		size = gen8_get_total_gtt_size(snb_gmch_ctl);
@@ -3348,23 +3348,23 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
  	ggtt->vm.cleanup = gen6_gmch_remove;
  	ggtt->vm.insert_page = gen8_ggtt_insert_page;
  	ggtt->vm.clear_range = nop_clear_range;
-	if (intel_scanout_needs_vtd_wa(dev_priv))
+	if (intel_scanout_needs_vtd_wa(i915))
  		ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries; /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
-	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
+	if (intel_ggtt_update_needs_vtd_wa(i915) ||
+	    IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
  		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
  		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
  		if (ggtt->vm.clear_range != nop_clear_range)
  			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
/* Prevent recursively calling stop_machine() and deadlocks. */
-		dev_info(dev_priv->drm.dev,
+		dev_info(i915->drm.dev,
  			 "Disabling error capture for VT-d workaround\n");
-		i915_disable_error_state(dev_priv, -ENODEV);
+		i915_disable_error_state(i915, -ENODEV);
  	}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3376,15 +3376,15 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.pte_encode = gen8_pte_encode; - setup_private_pat(dev_priv);
+	setup_private_pat(i915);
return ggtt_probe_common(ggtt, size);
  }
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
  {
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
  	unsigned int size;
  	u16 snb_gmch_ctl;
  	int err;
@@ -3413,7 +3413,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
  	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.clear_range = nop_clear_range;
-	if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+	if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
  		ggtt->vm.clear_range = gen6_ggtt_clear_range;
  	ggtt->vm.insert_page = gen6_ggtt_insert_page;
  	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
@@ -3421,13 +3421,13 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->invalidate = gen6_ggtt_invalidate; - if (HAS_EDRAM(dev_priv))
+	if (HAS_EDRAM(i915))
  		ggtt->vm.pte_encode = iris_pte_encode;
-	else if (IS_HASWELL(dev_priv))
+	else if (IS_HASWELL(i915))
  		ggtt->vm.pte_encode = hsw_pte_encode;
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		ggtt->vm.pte_encode = byt_pte_encode;
-	else if (INTEL_GEN(dev_priv) >= 7)
+	else if (INTEL_GEN(i915) >= 7)
  		ggtt->vm.pte_encode = ivb_pte_encode;
  	else
  		ggtt->vm.pte_encode = snb_pte_encode;
@@ -3447,11 +3447,11 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
  {
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
  	phys_addr_t gmadr_base;
  	int ret;
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
+	ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
  	if (!ret) {
  		DRM_ERROR("failed to set up gmch\n");
  		return -EIO;
@@ -3463,7 +3463,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
  		(struct resource) DEFINE_RES_MEM(gmadr_base,
  						 ggtt->mappable_end);
- ggtt->do_idle_maps = needs_idle_maps(dev_priv);
+	ggtt->do_idle_maps = needs_idle_maps(i915);
  	ggtt->vm.insert_page = i915_ggtt_insert_page;
  	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
  	ggtt->vm.clear_range = i915_ggtt_clear_range;
@@ -3484,19 +3484,19 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
/**
   * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
+ * @i915: i915 device
   */
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	int ret;
- ggtt->vm.i915 = dev_priv;
-	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
+	ggtt->vm.i915 = i915;
+	ggtt->vm.dma = &i915->drm.pdev->dev;
- if (INTEL_GEN(dev_priv) <= 5)
+	if (INTEL_GEN(i915) <= 5)
  		ret = i915_gmch_probe(ggtt);
-	else if (INTEL_GEN(dev_priv) < 8)
+	else if (INTEL_GEN(i915) < 8)
  		ret = gen6_gmch_probe(ggtt);
  	else
  		ret = gen8_gmch_probe(ggtt);
@@ -3532,35 +3532,35 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
/**
   * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
+ * @i915: i915 device
   */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	int ret;
- stash_init(&dev_priv->mm.wc_stash);
+	stash_init(&i915->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
  	 * end of the address space. This is required as the CS may prefetch
  	 * beyond the end of the batch buffer, across the page boundary,
  	 * and beyond the end of the GTT if we do not provide a guard.
  	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true; /* Only VLV supports read-only GGTT mappings */
-	ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
- if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
+	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
  		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
-				dev_priv->ggtt.gmadr.start,
-				dev_priv->ggtt.mappable_end)) {
+	if (!io_mapping_init_wc(&i915->ggtt.iomap,
+				i915->ggtt.gmadr.start,
+				i915->ggtt.mappable_end)) {
  		ret = -EIO;
  		goto out_gtt_cleanup;
  	}
@@ -3571,7 +3571,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
  	 * Initialise stolen early so that we may reserve preallocated
  	 * objects for the BIOS to KMS transition.
  	 */
-	ret = i915_gem_init_stolen(dev_priv);
+	ret = i915_gem_init_stolen(i915);
  	if (ret)
  		goto out_gtt_cleanup;
@@ -3582,9 +3582,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
  	return ret;
  }
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
+	if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
  		return -EIO;
return 0;
@@ -3613,12 +3613,12 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
  	i915_ggtt_invalidate(i915);
  }
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	struct i915_vma *vma, *vn;
- i915_check_and_clear_faults(dev_priv);
+	i915_check_and_clear_faults(i915);
mutex_lock(&ggtt->vm.mutex); @@ -3652,15 +3652,15 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
  	}
ggtt->vm.closed = false;
-	i915_ggtt_invalidate(dev_priv);
+	i915_ggtt_invalidate(i915);
mutex_unlock(&ggtt->vm.mutex); - if (INTEL_GEN(dev_priv) >= 8) {
-		struct intel_ppat *ppat = &dev_priv->ppat;
+	if (INTEL_GEN(i915) >= 8) {
+		struct intel_ppat *ppat = &i915->ppat;
bitmap_set(ppat->dirty, 0, ppat->max_entries);
-		dev_priv->ppat.update_hw(dev_priv);
+		i915->ppat.update_hw(i915);
  		return;
  	}
  }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 89437d0a721c..b444552ccc63 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -400,7 +400,7 @@ struct i915_ggtt {
/** "Graphics Stolen Memory" holds the global PTEs */
  	void __iomem *gsm;
-	void (*invalidate)(struct drm_i915_private *dev_priv);
+	void (*invalidate)(struct drm_i915_private *i915);
bool do_idle_maps; @@ -627,17 +627,17 @@ const struct intel_ppat_entry *
  intel_ppat_get(struct drm_i915_private *i915, u8 value);
  void intel_ppat_put(const struct intel_ppat_entry *entry);
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
  void i915_ggtt_enable_guc(struct drm_i915_private *i915);
  void i915_ggtt_disable_guc(struct drm_i915_private *i915);
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
+int i915_gem_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_cleanup_hw(struct drm_i915_private *i915);
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ppgtt_init_hw(struct drm_i915_private *i915);
-struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
+struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *i915);
static inline struct i915_address_space *
  i915_vm_get(struct i915_address_space *vm)
@@ -657,8 +657,8 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base);
  void gen6_ppgtt_unpin(struct i915_ppgtt *base);
  void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
  					    struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7f164c9f2eb..335da59031b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1122,22 +1122,22 @@ static u32 i915_error_generate_code(struct i915_gpu_state *error,
static void gem_record_fences(struct i915_gpu_state *error)
  {
-	struct drm_i915_private *dev_priv = error->i915;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = error->i915;
+	struct intel_uncore *uncore = &i915->uncore;
  	int i;
- if (INTEL_GEN(dev_priv) >= 6) {
-		for (i = 0; i < dev_priv->num_fence_regs; i++)
+	if (INTEL_GEN(i915) >= 6) {
+		for (i = 0; i < i915->num_fence_regs; i++)
  			error->fence[i] =
  				intel_uncore_read64(uncore,
  						    FENCE_REG_GEN6_LO(i));
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		for (i = 0; i < dev_priv->num_fence_regs; i++)
+	} else if (INTEL_GEN(i915) >= 4) {
+		for (i = 0; i < i915->num_fence_regs; i++)
  			error->fence[i] =
  				intel_uncore_read64(uncore,
  						    FENCE_REG_965_LO(i));
  	} else {
-		for (i = 0; i < dev_priv->num_fence_regs; i++)
+		for (i = 0; i < i915->num_fence_regs; i++)
  			error->fence[i] =
  				intel_uncore_read(uncore, FENCE_REG(i));
  	}
@@ -1148,23 +1148,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
  					  struct intel_engine_cs *engine,
  					  struct drm_i915_error_engine *ee)
  {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(i915) >= 6) {
  		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
-		if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(i915) >= 8)
  			ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
  		else
  			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
  	}
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
  		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
  		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
  		ee->instps = ENGINE_READ(engine, RING_INSTPS);
  		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
-		if (INTEL_GEN(dev_priv) >= 8) {
+		if (INTEL_GEN(i915) >= 8) {
  			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
  			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
  		}
@@ -1183,13 +1183,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
  	ee->head = ENGINE_READ(engine, RING_HEAD);
  	ee->tail = ENGINE_READ(engine, RING_TAIL);
  	ee->ctl = ENGINE_READ(engine, RING_CTL);
-	if (INTEL_GEN(dev_priv) > 2)
+	if (INTEL_GEN(i915) > 2)
  		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
- if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
+	if (!HWS_NEEDS_PHYSICAL(i915)) {
  		i915_reg_t mmio;
- if (IS_GEN(dev_priv, 7)) {
+		if (IS_GEN(i915, 7)) {
  			switch (engine->id) {
  			default:
  				MISSING_CASE(engine->id);
@@ -1219,21 +1219,21 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
  	ee->idle = intel_engine_is_idle(engine);
  	if (!ee->idle)
  		ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
-	ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
+	ee->reset_count = i915_reset_engine_count(&i915->gpu_error,
  						  engine);
- if (HAS_PPGTT(dev_priv)) {
+	if (HAS_PPGTT(i915)) {
  		int i;
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); - if (IS_GEN(dev_priv, 6)) {
+		if (IS_GEN(i915, 6)) {
  			ee->vm_info.pp_dir_base =
  				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
-		} else if (IS_GEN(dev_priv, 7)) {
+		} else if (IS_GEN(i915, 7)) {
  			ee->vm_info.pp_dir_base =
  				ENGINE_READ(engine, RING_PP_DIR_BASE);
-		} else if (INTEL_GEN(dev_priv) >= 8) {
+		} else if (INTEL_GEN(i915) >= 8) {
  			u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
@@ -1388,7 +1388,7 @@ static void request_record_user_bo(struct i915_request *request,
  }
static struct drm_i915_error_object *
-capture_object(struct drm_i915_private *dev_priv,
+capture_object(struct drm_i915_private *i915,
  	       struct drm_i915_gem_object *obj)
  {
  	if (obj && i915_gem_object_has_pages(obj)) {
@@ -1399,7 +1399,7 @@ capture_object(struct drm_i915_private *dev_priv,
  			.obj = obj,
  		};
- return i915_error_object_create(dev_priv, &fake);
+		return i915_error_object_create(i915, &fake);
  	} else {
  		return NULL;
  	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 2ecd0c6a1c94..980f95fb5578 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -234,7 +234,7 @@ struct i915_gpu_error {
/**
  	 * Waitqueue to signal when the reset has completed. Used by clients
-	 * that wait for dev_priv->mm.wedged to settle.
+	 * that wait for i915->mm.wedged to settle.
  	 */
  	wait_queue_head_t reset_queue;
@@ -259,7 +259,7 @@ __printf(2, 3)
  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
+void i915_capture_error_state(struct drm_i915_private *i915,
  			      intel_engine_mask_t engine_mask,
  			      const char *error_msg);
@@ -286,7 +286,7 @@ void i915_disable_error_state(struct drm_i915_private *i915, int err); #else -static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+static inline void i915_capture_error_state(struct drm_i915_private *i915,
  					    u32 engine_mask,
  					    const char *error_msg)
  {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cd9edddd6718..81ea65d85b9f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -256,18 +256,18 @@ static void gen2_irq_init(struct intel_uncore *uncore,
  #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
  	gen2_irq_init((uncore), imr_val, ier_val)
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen6_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *i915, u32 pm_iir);
/* For display hotplug interrupt */
  static inline void
-i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
  				     u32 mask,
  				     u32 bits)
  {
  	u32 val;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
  	WARN_ON(bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
@@ -278,7 +278,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
/**
   * i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @i915: driver private
   * @mask: bits to update
   * @bits: bits to enable
   * NOTE: the HPD enable bits are modified both inside and outside
@@ -288,13 +288,13 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
   * held already, this function acquires the lock itself. A non-locking
   * version is also available.
   */
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
  				   u32 mask,
  				   u32 bits)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
-	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	i915_hotplug_interrupt_update_locked(i915, mask, bits);
+	spin_unlock_irq(&i915->irq_lock);
  }
static u32
@@ -334,84 +334,84 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
/**
   * ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @i915: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct drm_i915_private *i915,
  			    u32 interrupt_mask,
  			    u32 enabled_irq_mask)
  {
  	u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
- new_val = dev_priv->irq_mask;
+	new_val = i915->irq_mask;
  	new_val &= ~interrupt_mask;
  	new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->irq_mask) {
-		dev_priv->irq_mask = new_val;
-		I915_WRITE(DEIMR, dev_priv->irq_mask);
+	if (new_val != i915->irq_mask) {
+		i915->irq_mask = new_val;
+		I915_WRITE(DEIMR, i915->irq_mask);
  		POSTING_READ(DEIMR);
  	}
  }
/**
   * ilk_update_gt_irq - update GTIMR
- * @dev_priv: driver private
+ * @i915: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+static void ilk_update_gt_irq(struct drm_i915_private *i915,
  			      u32 interrupt_mask,
  			      u32 enabled_irq_mask)
  {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
- dev_priv->gt_irq_mask &= ~interrupt_mask;
-	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	i915->gt_irq_mask &= ~interrupt_mask;
+	i915->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+	I915_WRITE(GTIMR, i915->gt_irq_mask);
  }
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen5_enable_gt_irq(struct drm_i915_private *i915, u32 mask)
  {
-	ilk_update_gt_irq(dev_priv, mask, mask);
-	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
+	ilk_update_gt_irq(i915, mask, mask);
+	intel_uncore_posting_read_fw(&i915->uncore, GTIMR);
  }
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen5_disable_gt_irq(struct drm_i915_private *i915, u32 mask)
  {
-	ilk_update_gt_irq(dev_priv, mask, 0);
+	ilk_update_gt_irq(i915, mask, 0);
  }
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
+static i915_reg_t gen6_pm_iir(struct drm_i915_private *i915)
  {
-	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
+	WARN_ON_ONCE(INTEL_GEN(i915) >= 11);
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+	return INTEL_GEN(i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
  }
-static void write_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct drm_i915_private *i915)
  {
  	i915_reg_t reg;
-	u32 mask = dev_priv->pm_imr;
+	u32 mask = i915->pm_imr;
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
  		/* pm is in upper half */
  		mask = mask << 16;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+	} else if (INTEL_GEN(i915) >= 8) {
  		reg = GEN8_GT_IMR(2);
  	} else {
  		reg = GEN6_PMIMR;
@@ -421,16 +421,16 @@ static void write_pm_imr(struct drm_i915_private *dev_priv)
  	POSTING_READ(reg);
  }
-static void write_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct drm_i915_private *i915)
  {
  	i915_reg_t reg;
-	u32 mask = dev_priv->pm_ier;
+	u32 mask = i915->pm_ier;
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
  		/* pm is in upper half */
  		mask = mask << 16;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+	} else if (INTEL_GEN(i915) >= 8) {
  		reg = GEN8_GT_IER(2);
  	} else {
  		reg = GEN6_PMIER;
@@ -441,11 +441,11 @@ static void write_pm_ier(struct drm_i915_private *dev_priv)
/**
   * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
+ * @i915: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct drm_i915_private *i915,
  			      u32 interrupt_mask,
  			      u32 enabled_irq_mask)
  {
@@ -453,127 +453,127 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- new_val = dev_priv->pm_imr;
+	new_val = i915->pm_imr;
  	new_val &= ~interrupt_mask;
  	new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_imr) {
-		dev_priv->pm_imr = new_val;
-		write_pm_imr(dev_priv);
+	if (new_val != i915->pm_imr) {
+		i915->pm_imr = new_val;
+		write_pm_imr(i915);
  	}
  }
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *i915, u32 mask)
  {
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
- snb_update_pm_irq(dev_priv, mask, mask);
+	snb_update_pm_irq(i915, mask, mask);
  }
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *i915, u32 mask)
  {
-	snb_update_pm_irq(dev_priv, mask, 0);
+	snb_update_pm_irq(i915, mask, 0);
  }
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_mask_pm_irq(struct drm_i915_private *i915, u32 mask)
  {
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
- __gen6_mask_pm_irq(dev_priv, mask);
+	__gen6_mask_pm_irq(i915, mask);
  }
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
+static void gen6_reset_pm_iir(struct drm_i915_private *i915, u32 reset_mask)
  {
-	i915_reg_t reg = gen6_pm_iir(dev_priv);
+	i915_reg_t reg = gen6_pm_iir(i915);
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
I915_WRITE(reg, reset_mask);
  	I915_WRITE(reg, reset_mask);
  	POSTING_READ(reg);
  }
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+static void gen6_enable_pm_irq(struct drm_i915_private *i915, u32 enable_mask)
  {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- dev_priv->pm_ier |= enable_mask;
-	write_pm_ier(dev_priv);
-	gen6_unmask_pm_irq(dev_priv, enable_mask);
+	i915->pm_ier |= enable_mask;
+	write_pm_ier(i915);
+	gen6_unmask_pm_irq(i915, enable_mask);
  	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
  }
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+static void gen6_disable_pm_irq(struct drm_i915_private *i915, u32 disable_mask)
  {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- dev_priv->pm_ier &= ~disable_mask;
-	__gen6_mask_pm_irq(dev_priv, disable_mask);
-	write_pm_ier(dev_priv);
+	i915->pm_ier &= ~disable_mask;
+	__gen6_mask_pm_irq(i915, disable_mask);
+	write_pm_ier(i915);
  	/* though a barrier is missing here, but don't really need a one */
  }
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen11_reset_rps_interrupts(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+	while (gen11_reset_one_iir(i915, 0, GEN11_GTPM))
  		;
- dev_priv->gt_pm.rps.pm_iir = 0;
+	i915->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_rps_interrupts(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
-	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
-	dev_priv->gt_pm.rps.pm_iir = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	gen6_reset_pm_iir(i915, GEN6_PM_RPS_EVENTS);
+	i915->gt_pm.rps.pm_iir = 0;
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_enable_rps_interrupts(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
if (READ_ONCE(rps->interrupts_enabled))
  		return;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	WARN_ON_ONCE(rps->pm_iir);
- if (INTEL_GEN(dev_priv) >= 11)
-		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
+	if (INTEL_GEN(i915) >= 11)
+		WARN_ON_ONCE(gen11_reset_one_iir(i915, 0, GEN11_GTPM));
  	else
-		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+		WARN_ON_ONCE(I915_READ(gen6_pm_iir(i915)) & i915->pm_rps_events);
rps->interrupts_enabled = true;
-	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	gen6_enable_pm_irq(i915, i915->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_disable_rps_interrupts(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
if (!READ_ONCE(rps->interrupts_enabled))
  		return;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	rps->interrupts_enabled = false;
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(i915, ~0u));
- gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+	gen6_disable_pm_irq(i915, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	spin_unlock_irq(&i915->irq_lock);
+	synchronize_irq(i915->drm.irq);
/* Now that we will not be generating any more work, flush any
  	 * outstanding tasks. As we are called on the RPS idle path,
@@ -581,48 +581,48 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
  	 * state of the worker can be discarded.
  	 */
  	cancel_work_sync(&rps->work);
-	if (INTEL_GEN(dev_priv) >= 11)
-		gen11_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(i915) >= 11)
+		gen11_reset_rps_interrupts(i915);
  	else
-		gen6_reset_rps_interrupts(dev_priv);
+		gen6_reset_rps_interrupts(i915);
  }
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_reset_guc_interrupts(struct drm_i915_private *i915)
  {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	gen6_reset_pm_iir(i915, i915->pm_guc_events);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_enable_guc_interrupts(struct drm_i915_private *i915)
  {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	if (!dev_priv->guc.interrupts.enabled) {
-		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
-				       dev_priv->pm_guc_events);
-		dev_priv->guc.interrupts.enabled = true;
-		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+	spin_lock_irq(&i915->irq_lock);
+	if (!i915->guc.interrupts.enabled) {
+		WARN_ON_ONCE(I915_READ(gen6_pm_iir(i915)) &
+				       i915->pm_guc_events);
+		i915->guc.interrupts.enabled = true;
+		gen6_enable_pm_irq(i915, i915->pm_guc_events);
  	}
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_disable_guc_interrupts(struct drm_i915_private *i915)
  {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	dev_priv->guc.interrupts.enabled = false;
+	spin_lock_irq(&i915->irq_lock);
+	i915->guc.interrupts.enabled = false;
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+	gen6_disable_pm_irq(i915, i915->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	spin_unlock_irq(&i915->irq_lock);
+	synchronize_irq(i915->drm.irq);
- gen9_reset_guc_interrupts(dev_priv);
+	gen9_reset_guc_interrupts(i915);
  }
void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
@@ -632,53 +632,53 @@ void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
  	spin_unlock_irq(&i915->irq_lock);
  }
-void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_enable_guc_interrupts(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (!dev_priv->guc.interrupts.enabled) {
+	spin_lock_irq(&i915->irq_lock);
+	if (!i915->guc.interrupts.enabled) {
  		u32 events = REG_FIELD_PREP(ENGINE1_MASK,
  					    GEN11_GUC_INTR_GUC2HOST);
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
+		WARN_ON_ONCE(gen11_reset_one_iir(i915, 0, GEN11_GUC));
  		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
  		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
-		dev_priv->guc.interrupts.enabled = true;
+		i915->guc.interrupts.enabled = true;
  	}
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_disable_guc_interrupts(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
-	dev_priv->guc.interrupts.enabled = false;
+	spin_lock_irq(&i915->irq_lock);
+	i915->guc.interrupts.enabled = false;
I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
  	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	spin_unlock_irq(&i915->irq_lock);
+	synchronize_irq(i915->drm.irq);
- gen11_reset_guc_interrupts(dev_priv);
+	gen11_reset_guc_interrupts(i915);
  }
/**
   * bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @i915: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_port_irq(struct drm_i915_private *i915,
  				u32 interrupt_mask,
  				u32 enabled_irq_mask)
  {
  	u32 new_val;
  	u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
old_val = I915_READ(GEN8_DE_PORT_IMR);
@@ -695,43 +695,43 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
   * bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @i915: driver private
   * @pipe: pipe whose interrupt to update
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+void bdw_update_pipe_irq(struct drm_i915_private *i915,
  			 enum pipe pipe,
  			 u32 interrupt_mask,
  			 u32 enabled_irq_mask)
  {
  	u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
- new_val = dev_priv->de_irq_mask[pipe];
+	new_val = i915->de_irq_mask[pipe];
  	new_val &= ~interrupt_mask;
  	new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->de_irq_mask[pipe]) {
-		dev_priv->de_irq_mask[pipe] = new_val;
-		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	if (new_val != i915->de_irq_mask[pipe]) {
+		i915->de_irq_mask[pipe] = new_val;
+		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), i915->de_irq_mask[pipe]);
  		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  	}
  }
/**
   * ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @i915: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
   */
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct drm_i915_private *i915,
  				  u32 interrupt_mask,
  				  u32 enabled_irq_mask)
  {
@@ -741,24 +741,24 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(i915)))
  		return;
I915_WRITE(SDEIMR, sdeimr);
  	POSTING_READ(SDEIMR);
  }
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+u32 i915_pipestat_enable_mask(struct drm_i915_private *i915,
  			      enum pipe pipe)
  {
-	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+	u32 status_mask = i915->pipestat_irq_mask[pipe];
  	u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- if (INTEL_GEN(dev_priv) < 5)
+	if (INTEL_GEN(i915) < 5)
  		goto out;
/*
@@ -791,7 +791,7 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
  	return enable_mask;
  }
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct drm_i915_private *i915,
  			  enum pipe pipe, u32 status_mask)
  {
  	i915_reg_t reg = PIPESTAT(pipe);
@@ -801,20 +801,20 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
  		  "pipe %c: status_mask=0x%x\n",
  		  pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
-	WARN_ON(!intel_irqs_enabled(dev_priv));
+	lockdep_assert_held(&i915->irq_lock);
+	WARN_ON(!intel_irqs_enabled(i915));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+	if ((i915->pipestat_irq_mask[pipe] & status_mask) == status_mask)
  		return;
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
-	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+	i915->pipestat_irq_mask[pipe] |= status_mask;
+	enable_mask = i915_pipestat_enable_mask(i915, pipe);
I915_WRITE(reg, enable_mask | status_mask);
  	POSTING_READ(reg);
  }
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct drm_i915_private *i915,
  			   enum pipe pipe, u32 status_mask)
  {
  	i915_reg_t reg = PIPESTAT(pipe);
@@ -824,44 +824,44 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
  		  "pipe %c: status_mask=0x%x\n",
  		  pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
-	WARN_ON(!intel_irqs_enabled(dev_priv));
+	lockdep_assert_held(&i915->irq_lock);
+	WARN_ON(!intel_irqs_enabled(i915));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+	if ((i915->pipestat_irq_mask[pipe] & status_mask) == 0)
  		return;
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
-	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+	i915->pipestat_irq_mask[pipe] &= ~status_mask;
+	enable_mask = i915_pipestat_enable_mask(i915, pipe);
I915_WRITE(reg, enable_mask | status_mask);
  	POSTING_READ(reg);
  }
-static bool i915_has_asle(struct drm_i915_private *dev_priv)
+static bool i915_has_asle(struct drm_i915_private *i915)
  {
-	if (!dev_priv->opregion.asle)
+	if (!i915->opregion.asle)
  		return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+	return IS_PINEVIEW(i915) || IS_MOBILE(i915);
  }
/**
   * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   */
-static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+static void i915_enable_asle_pipestat(struct drm_i915_private *i915)
  {
-	if (!i915_has_asle(dev_priv))
+	if (!i915_has_asle(i915))
  		return;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
-	if (INTEL_GEN(dev_priv) >= 4)
-		i915_enable_pipestat(dev_priv, PIPE_A,
+	i915_enable_pipestat(i915, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+	if (INTEL_GEN(i915) >= 4)
+		i915_enable_pipestat(i915, PIPE_A,
  				     PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
/*
@@ -919,7 +919,7 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
   */
  static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
  	const struct drm_display_mode *mode = &vblank->hwmode;
  	i915_reg_t high_frame, low_frame;
@@ -955,7 +955,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  	high_frame = PIPEFRAME(pipe);
  	low_frame = PIPEFRAMEPIXEL(pipe);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
/*
  	 * High & low register fields aren't synchronized, so make sure
@@ -968,7 +968,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
  	} while (high1 != high2);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
high1 >>= PIPE_FRAME_HIGH_SHIFT;
  	pixel = low & PIPE_PIXEL_MASK;
@@ -984,7 +984,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
  }
@@ -999,7 +999,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
   */
  static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct drm_vblank_crtc *vblank =
  		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
  	const struct drm_display_mode *mode = &vblank->hwmode;
@@ -1044,7 +1044,7 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
  static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	const struct drm_display_mode *mode;
  	struct drm_vblank_crtc *vblank;
  	enum pipe pipe = crtc->pipe;
@@ -1063,7 +1063,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  		vtotal /= 2;
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  	else
  		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -1080,7 +1080,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  	 * problem.  We may need to extend this to include other platforms,
  	 * but so far testing only shows the problem on HSW.
  	 */
-	if (HAS_DDI(dev_priv) && !position) {
+	if (HAS_DDI(i915) && !position) {
  		int i, temp;
for (i = 0; i < 100; i++) {
@@ -1105,14 +1105,14 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  				     ktime_t *stime, ktime_t *etime,
  				     const struct drm_display_mode *mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(i915,
  								pipe);
  	int position;
  	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  	unsigned long irqflags;
-	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
-		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
+	bool use_scanline_counter = INTEL_GEN(i915) >= 5 ||
+		IS_G4X(i915) || IS_GEN(i915, 2) ||
  		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (WARN_ON(!mode->crtc_clock)) {
@@ -1138,7 +1138,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  	 * register reads, potentially with preemption disabled, so the
  	 * following code must not block on uncore.lock.
  	 */
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -1193,7 +1193,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
/*
  	 * While in vblank, position will be negative
@@ -1219,20 +1219,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int intel_get_crtc_scanline(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	unsigned long irqflags;
  	int position;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
  	position = __intel_get_crtc_scanline(crtc);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
return position;
  }
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
+static void ironlake_rps_change_irq_handler(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 busy_up, busy_down, max_avg, min_avg;
  	u8 new_delay;
@@ -1242,7 +1242,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
  			     MEMINTRSTS,
  			     intel_uncore_read(uncore, MEMINTRSTS));
- new_delay = dev_priv->ips.cur_delay;
+	new_delay = i915->ips.cur_delay;
intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
  	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
@@ -1252,26 +1252,26 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
/* Handle RCS change request from hw */
  	if (busy_up > max_avg) {
-		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
-			new_delay = dev_priv->ips.cur_delay - 1;
-		if (new_delay < dev_priv->ips.max_delay)
-			new_delay = dev_priv->ips.max_delay;
+		if (i915->ips.cur_delay != i915->ips.max_delay)
+			new_delay = i915->ips.cur_delay - 1;
+		if (new_delay < i915->ips.max_delay)
+			new_delay = i915->ips.max_delay;
  	} else if (busy_down < min_avg) {
-		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
-			new_delay = dev_priv->ips.cur_delay + 1;
-		if (new_delay > dev_priv->ips.min_delay)
-			new_delay = dev_priv->ips.min_delay;
+		if (i915->ips.cur_delay != i915->ips.min_delay)
+			new_delay = i915->ips.cur_delay + 1;
+		if (new_delay > i915->ips.min_delay)
+			new_delay = i915->ips.min_delay;
  	}
- if (ironlake_set_drps(dev_priv, new_delay))
-		dev_priv->ips.cur_delay = new_delay;
+	if (ironlake_set_drps(i915, new_delay))
+		i915->ips.cur_delay = new_delay;
spin_unlock(&mchdev_lock); return;
  }
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
+static void vlv_c0_read(struct drm_i915_private *i915,
  			struct intel_rps_ei *ei)
  {
  	ei->ktime = ktime_get_raw();
@@ -1279,14 +1279,14 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
  	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
  }
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
+void gen6_rps_reset_ei(struct drm_i915_private *i915)
  {
-	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
+	memset(&i915->gt_pm.rps.ei, 0, sizeof(i915->gt_pm.rps.ei));
  }
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+static u32 vlv_wa_c0_ei(struct drm_i915_private *i915, u32 pm_iir)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	const struct intel_rps_ei *prev = &rps->ei;
  	struct intel_rps_ei now;
  	u32 events = 0;
@@ -1294,7 +1294,7 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
  	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
  		return 0;
- vlv_c0_read(dev_priv, &now);
+	vlv_c0_read(i915, &now);
if (prev->ktime) {
  		u64 time, c0;
@@ -1302,7 +1302,7 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
time = ktime_us_delta(now.ktime, prev->ktime); - time *= dev_priv->czclk_freq;
+		time *= i915->czclk_freq;
/* Workload can be split between render + media,
  		 * e.g. SwapBuffers being blitted in X after being rendered in
@@ -1326,28 +1326,28 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static void gen6_pm_rps_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private, gt_pm.rps.work);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	bool client_boost = false;
  	int new_delay, adj, min, max;
  	u32 pm_iir = 0;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	if (rps->interrupts_enabled) {
  		pm_iir = fetch_and_zero(&rps->pm_iir);
  		client_boost = atomic_read(&rps->num_waiters);
  	}
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
-	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
-	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
+	WARN_ON(pm_iir & ~i915->pm_rps_events);
+	if ((pm_iir & i915->pm_rps_events) == 0 && !client_boost)
  		goto out;
mutex_lock(&rps->lock); - pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+	pm_iir |= vlv_wa_c0_ei(i915, pm_iir);
adj = rps->last_adj;
  	new_delay = rps->cur_freq;
@@ -1362,7 +1362,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
  		if (adj > 0)
  			adj *= 2;
  		else /* CHV needs even encode values */
-			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+			adj = IS_CHERRYVIEW(i915) ? 2 : 1;
if (new_delay >= rps->max_freq_softlimit)
  			adj = 0;
@@ -1378,7 +1378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
  		if (adj < 0)
  			adj *= 2;
  		else /* CHV needs even encode values */
-			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+			adj = IS_CHERRYVIEW(i915) ? -2 : -1;
if (new_delay <= rps->min_freq_softlimit)
  			adj = 0;
@@ -1406,7 +1406,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
  	new_delay += adj;
  	new_delay = clamp_t(int, new_delay, min, max);
- if (intel_set_rps(dev_priv, new_delay)) {
+	if (intel_set_rps(i915, new_delay)) {
  		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
  		rps->last_adj = 0;
  	}
@@ -1415,10 +1415,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
out:
  	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
-	spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	if (rps->interrupts_enabled)
-		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
-	spin_unlock_irq(&dev_priv->irq_lock);
+		gen6_unmask_pm_irq(i915, i915->pm_rps_events);
+	spin_unlock_irq(&i915->irq_lock);
  }
@@ -1433,8 +1433,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
   */
  static void ivybridge_parity_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), l3_parity.error_work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), l3_parity.error_work);
  	u32 error_status, row, bank, subbank;
  	char *parity_event[6];
  	u32 misccpctl;
@@ -1444,24 +1444,24 @@ static void ivybridge_parity_work(struct work_struct *work)
  	 * In order to prevent a get/put style interface, acquire struct mutex
  	 * any time we access those registers.
  	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
-	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+	if (WARN_ON(!i915->l3_parity.which_slice))
  		goto out;
misccpctl = I915_READ(GEN7_MISCCPCTL);
  	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  	POSTING_READ(GEN7_MISCCPCTL);
- while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+	while ((slice = ffs(i915->l3_parity.which_slice)) != 0) {
  		i915_reg_t reg;
slice--;
-		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(i915)))
  			break;
- dev_priv->l3_parity.which_slice &= ~(1<<slice);
+		i915->l3_parity.which_slice &= ~(1<<slice);
reg = GEN7_L3CDERRST1(slice); @@ -1480,7 +1480,7 @@ static void ivybridge_parity_work(struct work_struct *work)
  		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
  		parity_event[5] = NULL;
- kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
+		kobject_uevent_env(&i915->drm.primary->kdev->kobj,
  				   KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1495,60 +1495,60 @@ static void ivybridge_parity_work(struct work_struct *work)
  	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
out:
-	WARN_ON(dev_priv->l3_parity.which_slice);
-	spin_lock_irq(&dev_priv->irq_lock);
-	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
-	spin_unlock_irq(&dev_priv->irq_lock);
+	WARN_ON(i915->l3_parity.which_slice);
+	spin_lock_irq(&i915->irq_lock);
+	gen5_enable_gt_irq(i915, GT_PARITY_ERROR(i915));
+	spin_unlock_irq(&i915->irq_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  }
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
+static void ivybridge_parity_error_irq_handler(struct drm_i915_private *i915,
  					       u32 iir)
  {
-	if (!HAS_L3_DPF(dev_priv))
+	if (!HAS_L3_DPF(i915))
  		return;
- spin_lock(&dev_priv->irq_lock);
-	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
-	spin_unlock(&dev_priv->irq_lock);
+	spin_lock(&i915->irq_lock);
+	gen5_disable_gt_irq(i915, GT_PARITY_ERROR(i915));
+	spin_unlock(&i915->irq_lock);
- iir &= GT_PARITY_ERROR(dev_priv);
+	iir &= GT_PARITY_ERROR(i915);
  	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
-		dev_priv->l3_parity.which_slice |= 1 << 1;
+		i915->l3_parity.which_slice |= 1 << 1;
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		dev_priv->l3_parity.which_slice |= 1 << 0;
+		i915->l3_parity.which_slice |= 1 << 0;
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+	queue_work(i915->wq, &i915->l3_parity.error_work);
  }
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
+static void ilk_gt_irq_handler(struct drm_i915_private *i915,
  			       u32 gt_iir)
  {
  	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[RCS0]);
  	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[VCS0]);
  }
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
+static void snb_gt_irq_handler(struct drm_i915_private *i915,
  			       u32 gt_iir)
  {
  	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[RCS0]);
  	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[VCS0]);
  	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  		      GT_BSD_CS_ERROR_INTERRUPT |
  		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
  		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
- if (gt_iir & GT_PARITY_ERROR(dev_priv))
-		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
+	if (gt_iir & GT_PARITY_ERROR(i915))
+		ivybridge_parity_error_irq_handler(i915, gt_iir);
  }
static void
@@ -1763,7 +1763,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   *
   * Note that the caller is expected to zero out the masks initially.
   */
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct drm_i915_private *i915,
  			       u32 *pin_mask, u32 *long_mask,
  			       u32 hotplug_trigger, u32 dig_hotplug_reg,
  			       const u32 hpd[HPD_NUM_PINS],
@@ -1786,25 +1786,25 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
} -static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
+static void gmbus_irq_handler(struct drm_i915_private *i915)
  {
-	wake_up_all(&dev_priv->gmbus_wait_queue);
+	wake_up_all(&i915->gmbus_wait_queue);
  }
-static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
+static void dp_aux_irq_handler(struct drm_i915_private *i915)
  {
-	wake_up_all(&dev_priv->gmbus_wait_queue);
+	wake_up_all(&i915->gmbus_wait_queue);
  }
#if defined(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct drm_i915_private *i915,
  					 enum pipe pipe,
  					 u32 crc0, u32 crc1,
  					 u32 crc2, u32 crc3,
  					 u32 crc4)
  {
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_pipe_crc *pipe_crc = &i915->pipe_crc[pipe];
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
  	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1819,7 +1819,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  	 * don't trust that one either.
  	 */
  	if (pipe_crc->skipped <= 0 ||
-	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+	    (INTEL_GEN(i915) >= 8 && pipe_crc->skipped == 1)) {
  		pipe_crc->skipped++;
  		spin_unlock(&pipe_crc->lock);
  		return;
@@ -1832,7 +1832,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  }
  #else
  static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct drm_i915_private *i915,
  			     enum pipe pipe,
  			     u32 crc0, u32 crc1,
  			     u32 crc2, u32 crc3,
@@ -1840,18 +1840,18 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  #endif
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct drm_i915_private *i915,
  				     enum pipe pipe)
  {
-	display_pipe_crc_irq_handler(dev_priv, pipe,
+	display_pipe_crc_irq_handler(i915, pipe,
  				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  				     0, 0, 0, 0);
  }
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct drm_i915_private *i915,
  				     enum pipe pipe)
  {
-	display_pipe_crc_irq_handler(dev_priv, pipe,
+	display_pipe_crc_irq_handler(i915, pipe,
  				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1859,22 +1859,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  }
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *i915,
  				      enum pipe pipe)
  {
  	u32 res1, res2;
- if (INTEL_GEN(dev_priv) >= 3)
+	if (INTEL_GEN(i915) >= 3)
  		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  	else
  		res1 = 0;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
  		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  	else
  		res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
+	display_pipe_crc_irq_handler(i915, pipe,
  				     I915_READ(PIPE_CRC_RES_RED(pipe)),
  				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1903,34 +1903,34 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
  	schedule_work(&rps->work);
  }
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+static void gen6_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
- if (pm_iir & dev_priv->pm_rps_events) {
-		spin_lock(&dev_priv->irq_lock);
-		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+	if (pm_iir & i915->pm_rps_events) {
+		spin_lock(&i915->irq_lock);
+		gen6_mask_pm_irq(i915, pm_iir & i915->pm_rps_events);
  		if (rps->interrupts_enabled) {
-			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+			rps->pm_iir |= pm_iir & i915->pm_rps_events;
  			schedule_work(&rps->work);
  		}
-		spin_unlock(&dev_priv->irq_lock);
+		spin_unlock(&i915->irq_lock);
  	}
- if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
+		intel_engine_breadcrumbs_irq(i915->engine[VECS0]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
  		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
  }
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+static void gen9_guc_irq_handler(struct drm_i915_private *i915, u32 gt_iir)
  {
  	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
-		intel_guc_to_host_event_handler(&dev_priv->guc);
+		intel_guc_to_host_event_handler(&i915->guc);
  }
static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
@@ -1939,32 +1939,32 @@ static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
  		intel_guc_to_host_event_handler(&i915->guc);
  }
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct drm_i915_private *i915)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		I915_WRITE(PIPESTAT(pipe),
  			   PIPESTAT_INT_STATUS_MASK |
  			   PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->pipestat_irq_mask[pipe] = 0;
+		i915->pipestat_irq_mask[pipe] = 0;
  	}
  }
-static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+static void i9xx_pipestat_irq_ack(struct drm_i915_private *i915,
  				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
  {
  	int pipe;
- spin_lock(&dev_priv->irq_lock);
+	spin_lock(&i915->irq_lock);
- if (!dev_priv->display_irqs_enabled) {
-		spin_unlock(&dev_priv->irq_lock);
+	if (!i915->display_irqs_enabled) {
+		spin_unlock(&i915->irq_lock);
  		return;
  	}
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		i915_reg_t reg;
  		u32 status_mask, enable_mask, iir_bit = 0;
@@ -1991,14 +1991,14 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
  			break;
  		}
  		if (iir & iir_bit)
-			status_mask |= dev_priv->pipestat_irq_mask[pipe];
+			status_mask |= i915->pipestat_irq_mask[pipe];
if (!status_mask)
  			continue;
reg = PIPESTAT(pipe);
  		pipe_stats[pipe] = I915_READ(reg) & status_mask;
-		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+		enable_mask = i915_pipestat_enable_mask(i915, pipe);
/*
  		 * Clear the PIPE*STAT regs before the IIR
@@ -2014,104 +2014,104 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
  			I915_WRITE(reg, enable_mask);
  		}
  	}
-	spin_unlock(&dev_priv->irq_lock);
+	spin_unlock(&i915->irq_lock);
  }
-static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+static void i8xx_pipestat_irq_handler(struct drm_i915_private *i915,
  				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+			i9xx_pipe_crc_irq_handler(i915, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
  	}
  }
-static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+static void i915_pipestat_irq_handler(struct drm_i915_private *i915,
  				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
  {
  	bool blc_event = false;
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  			blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+			i9xx_pipe_crc_irq_handler(i915, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
  	}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(i915);
  }
-static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+static void i965_pipestat_irq_handler(struct drm_i915_private *i915,
  				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
  {
  	bool blc_event = false;
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  			blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+			i9xx_pipe_crc_irq_handler(i915, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
  	}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(i915);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
  }
-static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+static void valleyview_pipestat_irq_handler(struct drm_i915_private *i915,
  					    u32 pipe_stats[I915_MAX_PIPES])
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+			i9xx_pipe_crc_irq_handler(i915, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
  	}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
  }
-static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+static u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915)
  {
  	u32 hotplug_status = 0, hotplug_status_mask;
  	int i;
- if (IS_G4X(dev_priv) ||
-	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_G4X(i915) ||
+	    IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
  			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
  	else
@@ -2143,35 +2143,35 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
  	return hotplug_status;
  }
-static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_hpd_irq_handler(struct drm_i915_private *i915,
  				 u32 hotplug_status)
  {
  	u32 pin_mask = 0, long_mask = 0;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv)) {
+	if (IS_G4X(i915) || IS_VALLEYVIEW(i915) ||
+	    IS_CHERRYVIEW(i915)) {
  		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) {
-			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+			intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  					   hotplug_trigger, hotplug_trigger,
  					   hpd_status_g4x,
  					   i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+			intel_hpd_irq_handler(i915, pin_mask, long_mask);
  		}
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
-			dp_aux_irq_handler(dev_priv);
+			dp_aux_irq_handler(i915);
  	} else {
  		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
-			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+			intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  					   hotplug_trigger, hotplug_trigger,
  					   hpd_status_i915,
  					   i9xx_port_hotplug_long_detect);
-			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+			intel_hpd_irq_handler(i915, pin_mask, long_mask);
  		}
  	}
  }
@@ -2179,14 +2179,14 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
do {
  		u32 iir, gt_iir, pm_iir;
@@ -2226,15 +2226,15 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  			I915_WRITE(GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
-			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			hotplug_status = i9xx_hpd_irq_ack(i915);
/* Call regardless, as some status bits might not be
  		 * signalled in iir */
-		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+		i9xx_pipestat_irq_ack(i915, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
  			   I915_LPE_PIPE_B_INTERRUPT))
-			intel_lpe_audio_irq_handler(dev_priv);
+			intel_lpe_audio_irq_handler(i915);
/*
  		 * VLV_IIR is single buffered, and reflects the level
@@ -2247,17 +2247,17 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
-			snb_gt_irq_handler(dev_priv, gt_iir);
+			snb_gt_irq_handler(i915, gt_iir);
  		if (pm_iir)
-			gen6_rps_irq_handler(dev_priv, pm_iir);
+			gen6_rps_irq_handler(i915, pm_iir);
if (hotplug_status)
-			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+			i9xx_hpd_irq_handler(i915, hotplug_status);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+		valleyview_pipestat_irq_handler(i915, pipe_stats);
  	} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
@@ -2265,14 +2265,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
do {
  		u32 master_ctl, iir;
@@ -2306,19 +2306,19 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  		ier = I915_READ(VLV_IER);
  		I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+		gen8_gt_irq_ack(i915, master_ctl, gt_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
-			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			hotplug_status = i9xx_hpd_irq_ack(i915);
/* Call regardless, as some status bits might not be
  		 * signalled in iir */
-		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+		i9xx_pipestat_irq_ack(i915, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
  			   I915_LPE_PIPE_B_INTERRUPT |
  			   I915_LPE_PIPE_C_INTERRUPT))
-			intel_lpe_audio_irq_handler(dev_priv);
+			intel_lpe_audio_irq_handler(i915);
/*
  		 * VLV_IIR is single buffered, and reflects the level
@@ -2330,20 +2330,20 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  		I915_WRITE(VLV_IER, ier);
  		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+		gen8_gt_irq_handler(i915, master_ctl, gt_iir);
if (hotplug_status)
-			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+			i9xx_hpd_irq_handler(i915, hotplug_status);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+		valleyview_pipestat_irq_handler(i915, pipe_stats);
  	} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
-static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+static void ibx_hpd_irq_handler(struct drm_i915_private *i915,
  				u32 hotplug_trigger,
  				const u32 hpd[HPD_NUM_PINS])
  {
@@ -2368,19 +2368,19 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	if (!hotplug_trigger)
  		return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
+	intel_get_hpd_pins(i915, &pin_mask, &long_mask, hotplug_trigger,
  			   dig_hotplug_reg, hpd,
  			   pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+	intel_hpd_irq_handler(i915, pin_mask, long_mask);
  }
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct drm_i915_private *i915, u32 pch_iir)
  {
  	int pipe;
  	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
+	ibx_hpd_irq_handler(i915, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
  		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2390,10 +2390,10 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  	}
if (pch_iir & SDE_AUX_MASK)
-		dp_aux_irq_handler(dev_priv);
+		dp_aux_irq_handler(i915);
if (pch_iir & SDE_GMBUS)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
  		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2405,7 +2405,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		DRM_ERROR("PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK)
-		for_each_pipe(dev_priv, pipe)
+		for_each_pipe(i915, pipe)
  			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  					 pipe_name(pipe),
  					 I915_READ(FDI_RX_IIR(pipe)));
@@ -2417,13 +2417,13 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
+		intel_pch_fifo_underrun_irq_handler(i915, PIPE_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
+		intel_pch_fifo_underrun_irq_handler(i915, PIPE_B);
  }
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct drm_i915_private *i915)
  {
  	u32 err_int = I915_READ(GEN7_ERR_INT);
  	enum pipe pipe;
@@ -2431,22 +2431,22 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
  	if (err_int & ERR_INT_POISON)
  		DRM_ERROR("Poison interrupt\n");
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
-			if (IS_IVYBRIDGE(dev_priv))
-				ivb_pipe_crc_irq_handler(dev_priv, pipe);
+			if (IS_IVYBRIDGE(i915))
+				ivb_pipe_crc_irq_handler(i915, pipe);
  			else
-				hsw_pipe_crc_irq_handler(dev_priv, pipe);
+				hsw_pipe_crc_irq_handler(i915, pipe);
  		}
  	}
I915_WRITE(GEN7_ERR_INT, err_int);
  }
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct drm_i915_private *i915)
  {
  	u32 serr_int = I915_READ(SERR_INT);
  	enum pipe pipe;
@@ -2454,19 +2454,19 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
  	if (serr_int & SERR_INT_POISON)
  		DRM_ERROR("PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
-			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_pch_fifo_underrun_irq_handler(i915, pipe);
I915_WRITE(SERR_INT, serr_int);
  }
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct drm_i915_private *i915, u32 pch_iir)
  {
  	int pipe;
  	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
+	ibx_hpd_irq_handler(i915, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2476,10 +2476,10 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  	}
if (pch_iir & SDE_AUX_MASK_CPT)
-		dp_aux_irq_handler(dev_priv);
+		dp_aux_irq_handler(i915);
if (pch_iir & SDE_GMBUS_CPT)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2488,16 +2488,16 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT)
-		for_each_pipe(dev_priv, pipe)
+		for_each_pipe(i915, pipe)
  			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  					 pipe_name(pipe),
  					 I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & SDE_ERROR_CPT)
-		cpt_serr_int_handler(dev_priv);
+		cpt_serr_int_handler(i915);
  }
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir)
  {
  	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
  	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -2509,7 +2509,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
  		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  				   ddi_hotplug_trigger,
  				   dig_hotplug_reg, hpd_icp,
  				   icp_ddi_port_hotplug_long_detect);
@@ -2521,20 +2521,20 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
  		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  				   tc_hotplug_trigger,
  				   dig_hotplug_reg, hpd_icp,
  				   icp_tc_port_hotplug_long_detect);
  	}
if (pin_mask)
-		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+		intel_hpd_irq_handler(i915, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
  }
-static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir)
  {
  	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
  		~SDE_PORTE_HOTPLUG_SPT;
@@ -2547,7 +2547,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
  				   spt_port_hotplug_long_detect);
  	}
@@ -2558,19 +2558,19 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
  		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask,
  				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
  				   spt_port_hotplug2_long_detect);
  	}
if (pin_mask)
-		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+		intel_hpd_irq_handler(i915, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
-		gmbus_irq_handler(dev_priv);
+		gmbus_irq_handler(i915);
  }
-static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
+static void ilk_hpd_irq_handler(struct drm_i915_private *i915,
  				u32 hotplug_trigger,
  				const u32 hpd[HPD_NUM_PINS])
  {
@@ -2579,94 +2579,94 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
  	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
+	intel_get_hpd_pins(i915, &pin_mask, &long_mask, hotplug_trigger,
  			   dig_hotplug_reg, hpd,
  			   ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+	intel_hpd_irq_handler(i915, pin_mask, long_mask);
  }
-static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
+static void ilk_display_irq_handler(struct drm_i915_private *i915,
  				    u32 de_iir)
  {
  	enum pipe pipe;
  	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
-		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
+		ilk_hpd_irq_handler(i915, hotplug_trigger, hpd_ilk);
if (de_iir & DE_AUX_CHANNEL_A)
-		dp_aux_irq_handler(dev_priv);
+		dp_aux_irq_handler(i915);
if (de_iir & DE_GSE)
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(i915);
if (de_iir & DE_POISON)
  		DRM_ERROR("Poison interrupt\n");
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (de_iir & DE_PIPE_VBLANK(pipe))
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
-			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+			i9xx_pipe_crc_irq_handler(i915, pipe);
  	}
/* check event from PCH */
  	if (de_iir & DE_PCH_EVENT) {
  		u32 pch_iir = I915_READ(SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
-			cpt_irq_handler(dev_priv, pch_iir);
+		if (HAS_PCH_CPT(i915))
+			cpt_irq_handler(i915, pch_iir);
  		else
-			ibx_irq_handler(dev_priv, pch_iir);
+			ibx_irq_handler(i915, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
  		I915_WRITE(SDEIIR, pch_iir);
  	}
- if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
-		ironlake_rps_change_irq_handler(dev_priv);
+	if (IS_GEN(i915, 5) && de_iir & DE_PCU_EVENT)
+		ironlake_rps_change_irq_handler(i915);
  }
-static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_display_irq_handler(struct drm_i915_private *i915,
  				    u32 de_iir)
  {
  	enum pipe pipe;
  	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
-		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
+		ilk_hpd_irq_handler(i915, hotplug_trigger, hpd_ivb);
if (de_iir & DE_ERR_INT_IVB)
-		ivb_err_int_handler(dev_priv);
+		ivb_err_int_handler(i915);
if (de_iir & DE_EDP_PSR_INT_HSW) {
  		u32 psr_iir = I915_READ(EDP_PSR_IIR);
- intel_psr_irq_handler(dev_priv, psr_iir);
+		intel_psr_irq_handler(i915, psr_iir);
  		I915_WRITE(EDP_PSR_IIR, psr_iir);
  	}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
-		dp_aux_irq_handler(dev_priv);
+		dp_aux_irq_handler(i915);
if (de_iir & DE_GSE_IVB)
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(i915);
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
  	}
/* check event from PCH */
-	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
+	if (!HAS_PCH_NOP(i915) && (de_iir & DE_PCH_EVENT_IVB)) {
  		u32 pch_iir = I915_READ(SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+		cpt_irq_handler(i915, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
  		I915_WRITE(SDEIIR, pch_iir);
@@ -2684,15 +2684,15 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
  static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
/* disable master interrupt before clearing iir */
  	de_ier = I915_READ(DEIER);
@@ -2703,7 +2703,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  	 * able to process them after we restore SDEIER (as soon as we restore
  	 * it, we'll get an interrupt if SDEIIR still has something to process
  	 * due to its back queue). */
-	if (!HAS_PCH_NOP(dev_priv)) {
+	if (!HAS_PCH_NOP(i915)) {
  		sde_ier = I915_READ(SDEIER);
  		I915_WRITE(SDEIER, 0);
  	}
@@ -2714,42 +2714,42 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  	if (gt_iir) {
  		I915_WRITE(GTIIR, gt_iir);
  		ret = IRQ_HANDLED;
-		if (INTEL_GEN(dev_priv) >= 6)
-			snb_gt_irq_handler(dev_priv, gt_iir);
+		if (INTEL_GEN(i915) >= 6)
+			snb_gt_irq_handler(i915, gt_iir);
  		else
-			ilk_gt_irq_handler(dev_priv, gt_iir);
+			ilk_gt_irq_handler(i915, gt_iir);
  	}
de_iir = I915_READ(DEIIR);
  	if (de_iir) {
  		I915_WRITE(DEIIR, de_iir);
  		ret = IRQ_HANDLED;
-		if (INTEL_GEN(dev_priv) >= 7)
-			ivb_display_irq_handler(dev_priv, de_iir);
+		if (INTEL_GEN(i915) >= 7)
+			ivb_display_irq_handler(i915, de_iir);
  		else
-			ilk_display_irq_handler(dev_priv, de_iir);
+			ilk_display_irq_handler(i915, de_iir);
  	}
- if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(i915) >= 6) {
  		u32 pm_iir = I915_READ(GEN6_PMIIR);
  		if (pm_iir) {
  			I915_WRITE(GEN6_PMIIR, pm_iir);
  			ret = IRQ_HANDLED;
-			gen6_rps_irq_handler(dev_priv, pm_iir);
+			gen6_rps_irq_handler(i915, pm_iir);
  		}
  	}
I915_WRITE(DEIER, de_ier);
-	if (!HAS_PCH_NOP(dev_priv))
+	if (!HAS_PCH_NOP(i915))
  		I915_WRITE(SDEIER, sde_ier);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
-static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
+static void bxt_hpd_irq_handler(struct drm_i915_private *i915,
  				u32 hotplug_trigger,
  				const u32 hpd[HPD_NUM_PINS])
  {
@@ -2758,14 +2758,14 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
+	intel_get_hpd_pins(i915, &pin_mask, &long_mask, hotplug_trigger,
  			   dig_hotplug_reg, hpd,
  			   bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+	intel_hpd_irq_handler(i915, pin_mask, long_mask);
  }
-static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+static void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir)
  {
  	u32 pin_mask = 0, long_mask = 0;
  	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -2777,7 +2777,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
  		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
  		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask, trigger_tc,
  				   dig_hotplug_reg, hpd_gen11,
  				   gen11_port_hotplug_long_detect);
  	}
@@ -2788,30 +2788,30 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
  		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
  		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+		intel_get_hpd_pins(i915, &pin_mask, &long_mask, trigger_tbt,
  				   dig_hotplug_reg, hpd_gen11,
  				   gen11_port_hotplug_long_detect);
  	}
if (pin_mask)
-		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+		intel_hpd_irq_handler(i915, pin_mask, long_mask);
  	else
  		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
  }
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *i915)
  {
  	u32 mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		mask |= GEN9_AUX_CHANNEL_B |
  			GEN9_AUX_CHANNEL_C |
  			GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+	if (IS_CNL_WITH_PORT_F(i915))
  		mask |= CNL_AUX_CHANNEL_F;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		mask |= ICL_AUX_CHANNEL_E |
  			CNL_AUX_CHANNEL_F;
@@ -2819,7 +2819,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
  }
static irqreturn_t
-gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl)
  {
  	irqreturn_t ret = IRQ_NONE;
  	u32 iir;
@@ -2834,14 +2834,14 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  			ret = IRQ_HANDLED;
if (iir & GEN8_DE_MISC_GSE) {
-				intel_opregion_asle_intr(dev_priv);
+				intel_opregion_asle_intr(i915);
  				found = true;
  			}
if (iir & GEN8_DE_EDP_PSR) {
  				u32 psr_iir = I915_READ(EDP_PSR_IIR);
- intel_psr_irq_handler(dev_priv, psr_iir);
+				intel_psr_irq_handler(i915, psr_iir);
  				I915_WRITE(EDP_PSR_IIR, psr_iir);
  				found = true;
  			}
@@ -2853,12 +2853,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  	}
- if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+	if (INTEL_GEN(i915) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
  		iir = I915_READ(GEN11_DE_HPD_IIR);
  		if (iir) {
  			I915_WRITE(GEN11_DE_HPD_IIR, iir);
  			ret = IRQ_HANDLED;
-			gen11_hpd_irq_handler(dev_priv, iir);
+			gen11_hpd_irq_handler(i915, iir);
  		} else {
  			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
  		}
@@ -2873,29 +2873,29 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  			I915_WRITE(GEN8_DE_PORT_IIR, iir);
  			ret = IRQ_HANDLED;
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
-				dp_aux_irq_handler(dev_priv);
+			if (iir & gen8_de_port_aux_mask(i915)) {
+				dp_aux_irq_handler(i915);
  				found = true;
  			}
- if (IS_GEN9_LP(dev_priv)) {
+			if (IS_GEN9_LP(i915)) {
  				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
  				if (tmp_mask) {
-					bxt_hpd_irq_handler(dev_priv, tmp_mask,
+					bxt_hpd_irq_handler(i915, tmp_mask,
  							    hpd_bxt);
  					found = true;
  				}
-			} else if (IS_BROADWELL(dev_priv)) {
+			} else if (IS_BROADWELL(i915)) {
  				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
  				if (tmp_mask) {
-					ilk_hpd_irq_handler(dev_priv,
+					ilk_hpd_irq_handler(i915,
  							    tmp_mask, hpd_bdw);
  					found = true;
  				}
  			}
- if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
-				gmbus_irq_handler(dev_priv);
+			if (IS_GEN9_LP(i915) && (iir & BXT_DE_PORT_GMBUS)) {
+				gmbus_irq_handler(i915);
  				found = true;
  			}
@@ -2906,7 +2906,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  	}
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -2922,16 +2922,16 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			drm_handle_vblank(&i915->drm, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
-			hsw_pipe_crc_irq_handler(dev_priv, pipe);
+			hsw_pipe_crc_irq_handler(i915, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
-			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+			intel_cpu_fifo_underrun_irq_handler(i915, pipe);
fault_errors = iir;
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  		else
  			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2942,7 +2942,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  				  fault_errors);
  	}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+	if (HAS_PCH_SPLIT(i915) && !HAS_PCH_NOP(i915) &&
  	    master_ctl & GEN8_DE_PCH_IRQ) {
  		/*
  		 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2954,12 +2954,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  			I915_WRITE(SDEIIR, iir);
  			ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-				icp_irq_handler(dev_priv, iir);
-			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
-				spt_irq_handler(dev_priv, iir);
+			if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+				icp_irq_handler(i915, iir);
+			else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
+				spt_irq_handler(i915, iir);
  			else
-				cpt_irq_handler(dev_priv, iir);
+				cpt_irq_handler(i915, iir);
  		} else {
  			/*
  			 * Like on previous PCH there seems to be something
@@ -2992,12 +2992,12 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
  {
-	struct drm_i915_private *dev_priv = to_i915(arg);
-	void __iomem * const regs = dev_priv->uncore.regs;
+	struct drm_i915_private *i915 = to_i915(arg);
+	void __iomem * const regs = i915->uncore.regs;
  	u32 master_ctl;
  	u32 gt_iir[4];
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
master_ctl = gen8_master_intr_disable(regs);
@@ -3007,18 +3007,18 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
  	}
/* Find, clear, then process each source of interrupt */
-	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+	gen8_gt_irq_ack(i915, master_ctl, gt_iir);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
  	if (master_ctl & ~GEN8_GT_IRQS) {
-		disable_rpm_wakeref_asserts(dev_priv);
-		gen8_de_irq_handler(dev_priv, master_ctl);
-		enable_rpm_wakeref_asserts(dev_priv);
+		disable_rpm_wakeref_asserts(i915);
+		gen8_de_irq_handler(i915, master_ctl);
+		enable_rpm_wakeref_asserts(i915);
  	}
gen8_master_intr_enable(regs); - gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+	gen8_gt_irq_handler(i915, master_ctl, gt_iir);
return IRQ_HANDLED;
  }
@@ -3149,9 +3149,9 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
  }
static u32
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
  {
-	void __iomem * const regs = dev_priv->uncore.regs;
+	void __iomem * const regs = i915->uncore.regs;
  	u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3165,10 +3165,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
  }
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
  {
  	if (iir & GEN11_GU_MISC_GSE)
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(i915);
  }
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -3235,54 +3235,54 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
   */
  static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	i915_enable_pipestat(i915, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
return 0;
  }
static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- if (dev_priv->i945gm_vblank.enabled++ == 0)
-		schedule_work(&dev_priv->i945gm_vblank.work);
+	if (i915->i945gm_vblank.enabled++ == 0)
+		schedule_work(&i915->i945gm_vblank.work);
return i8xx_enable_vblank(dev, pipe);
  }
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, pipe,
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	i915_enable_pipestat(i915, pipe,
  			     PIPE_START_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
return 0;
  }
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
-	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
+	u32 bit = INTEL_GEN(i915) >= 7 ?
  		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	ilk_enable_display_irq(dev_priv, bit);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	ilk_enable_display_irq(i915, bit);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
  	 * PSR is active as no frames are generated.
  	 */
-	if (HAS_PSR(dev_priv))
+	if (HAS_PSR(i915))
  		drm_vblank_restore(dev, pipe);
return 0;
@@ -3290,17 +3290,17 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_VBLANK);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
  	 * PSR is active as no frames are generated, so check only for PSR.
  	 */
-	if (HAS_PSR(dev_priv))
+	if (HAS_PSR(i915))
  		drm_vblank_restore(dev, pipe);
return 0;
@@ -3311,60 +3311,60 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
   */
  static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	i915_disable_pipestat(i915, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
  }
static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
i8xx_disable_vblank(dev, pipe); - if (--dev_priv->i945gm_vblank.enabled == 0)
-		schedule_work(&dev_priv->i945gm_vblank.work);
+	if (--i915->i945gm_vblank.enabled == 0)
+		schedule_work(&i915->i945gm_vblank.work);
  }
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, pipe,
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	i915_disable_pipestat(i915, pipe,
  			      PIPE_START_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
  }
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
-	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
+	u32 bit = INTEL_GEN(i915) >= 7 ?
  		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	ilk_disable_display_irq(dev_priv, bit);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	ilk_disable_display_irq(i915, bit);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
  }
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	spin_lock_irqsave(&i915->irq_lock, irqflags);
+	bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_VBLANK);
+	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
  }
static void i945gm_vblank_work_func(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private, i945gm_vblank.work);
/*
@@ -3372,9 +3372,9 @@ static void i945gm_vblank_work_func(struct work_struct *work)
  	 * hence we want to prevent C3 usage while vblank interrupts
  	 * are enabled.
  	 */
-	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
-			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
-			      dev_priv->i945gm_vblank.c3_disable_latency :
+	pm_qos_update_request(&i915->i945gm_vblank.pm_qos,
+			      READ_ONCE(i915->i945gm_vblank.enabled) ?
+			      i915->i945gm_vblank.c3_disable_latency :
  			      PM_QOS_DEFAULT_VALUE);
  }
@@ -3398,34 +3398,34 @@ static int cstate_disable_latency(const char *name)
  	return 0;
  }
-static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+static void i945gm_vblank_work_init(struct drm_i915_private *i915)
  {
-	INIT_WORK(&dev_priv->i945gm_vblank.work,
+	INIT_WORK(&i915->i945gm_vblank.work,
  		  i945gm_vblank_work_func);
- dev_priv->i945gm_vblank.c3_disable_latency =
+	i915->i945gm_vblank.c3_disable_latency =
  		cstate_disable_latency("C3");
-	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+	pm_qos_add_request(&i915->i945gm_vblank.pm_qos,
  			   PM_QOS_CPU_DMA_LATENCY,
  			   PM_QOS_DEFAULT_VALUE);
  }
-static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+static void i945gm_vblank_work_fini(struct drm_i915_private *i915)
  {
-	cancel_work_sync(&dev_priv->i945gm_vblank.work);
-	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+	cancel_work_sync(&i915->i945gm_vblank.work);
+	pm_qos_remove_request(&i915->i945gm_vblank.pm_qos);
  }
-static void ibx_irq_reset(struct drm_i915_private *dev_priv)
+static void ibx_irq_reset(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
- if (HAS_PCH_NOP(dev_priv))
+	if (HAS_PCH_NOP(i915))
  		return;
GEN3_IRQ_RESET(uncore, SDE); - if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
  		I915_WRITE(SERR_INT, 0xffffffff);
  }
@@ -3439,9 +3439,9 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
   */
  static void ibx_irq_pre_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- if (HAS_PCH_NOP(dev_priv))
+	if (HAS_PCH_NOP(i915))
  		return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3449,36 +3449,36 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
  	POSTING_READ(SDEIER);
  }
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen5_gt_irq_reset(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
GEN3_IRQ_RESET(uncore, GT);
-	if (INTEL_GEN(dev_priv) >= 6)
+	if (INTEL_GEN(i915) >= 6)
  		GEN3_IRQ_RESET(uncore, GEN6_PM);
  }
-static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void vlv_display_irq_reset(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  	else
  		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
+	i915_hotplug_interrupt_update_locked(i915, 0xffffffff, 0);
  	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- i9xx_pipestat_irq_reset(dev_priv);
+	i9xx_pipestat_irq_reset(i915);
GEN3_IRQ_RESET(uncore, VLV_);
-	dev_priv->irq_mask = ~0u;
+	i915->irq_mask = ~0u;
  }
-static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static void vlv_display_irq_postinstall(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
u32 pipestat_mask;
  	u32 enable_mask;
@@ -3486,9 +3486,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-	for_each_pipe(dev_priv, pipe)
-		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+	i915_enable_pipestat(i915, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	for_each_pipe(i915, pipe)
+		i915_enable_pipestat(i915, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
  		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -3496,56 +3496,56 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
  		I915_LPE_PIPE_A_INTERRUPT |
  		I915_LPE_PIPE_B_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
  			I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0u);
+	WARN_ON(i915->irq_mask != ~0u);
- dev_priv->irq_mask = ~enable_mask;
+	i915->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
+	GEN3_IRQ_INIT(uncore, VLV_, i915->irq_mask, enable_mask);
  }
/* drm_dma.h hooks
  */
  static void ironlake_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
GEN3_IRQ_RESET(uncore, DE);
-	if (IS_GEN(dev_priv, 7))
+	if (IS_GEN(i915, 7))
  		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- if (IS_HASWELL(dev_priv)) {
+	if (IS_HASWELL(i915)) {
  		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
  		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
  	}
- gen5_gt_irq_reset(dev_priv);
+	gen5_gt_irq_reset(i915);
- ibx_irq_reset(dev_priv);
+	ibx_irq_reset(i915);
  }
static void valleyview_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
I915_WRITE(VLV_MASTER_IER, 0);
  	POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev_priv);
+	gen5_gt_irq_reset(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		vlv_display_irq_reset(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->display_irqs_enabled)
+		vlv_display_irq_reset(i915);
+	spin_unlock_irq(&i915->irq_lock);
  }
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen8_gt_irq_reset(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
  	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
@@ -3555,19 +3555,19 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	int pipe;
- gen8_master_intr_disable(dev_priv->uncore.regs);
+	gen8_master_intr_disable(i915->uncore.regs);
- gen8_gt_irq_reset(dev_priv);
+	gen8_gt_irq_reset(i915);
I915_WRITE(EDP_PSR_IMR, 0xffffffff);
  	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
-		if (intel_display_power_is_enabled(dev_priv,
+	for_each_pipe(i915, pipe)
+		if (intel_display_power_is_enabled(i915,
  						   POWER_DOMAIN_PIPE(pipe)))
  			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
@@ -3575,11 +3575,11 @@ static void gen8_irq_reset(struct drm_device *dev)
  	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
  	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev_priv))
-		ibx_irq_reset(dev_priv);
+	if (HAS_PCH_SPLIT(i915))
+		ibx_irq_reset(i915);
  }
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_reset(struct drm_i915_private *i915)
  {
  	/* Disable RCS, BCS, VCS and VECS class engines. */
  	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
@@ -3600,21 +3600,21 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = dev->dev_private;
+	struct intel_uncore *uncore = &i915->uncore;
  	int pipe;
- gen11_master_intr_disable(dev_priv->uncore.regs);
+	gen11_master_intr_disable(i915->uncore.regs);
- gen11_gt_irq_reset(dev_priv);
+	gen11_gt_irq_reset(i915);
I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); I915_WRITE(EDP_PSR_IMR, 0xffffffff);
  	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
-		if (intel_display_power_is_enabled(dev_priv,
+	for_each_pipe(i915, pipe)
+		if (intel_display_power_is_enabled(i915,
  						   POWER_DOMAIN_PIPE(pipe)))
  			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
@@ -3624,87 +3624,87 @@ static void gen11_irq_reset(struct drm_device *dev)
  	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
  	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+	if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
  		GEN3_IRQ_RESET(uncore, SDE);
  }
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct drm_i915_private *i915,
  				     u8 pipe_mask)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
  	enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- if (!intel_irqs_enabled(dev_priv)) {
-		spin_unlock_irq(&dev_priv->irq_lock);
+	if (!intel_irqs_enabled(i915)) {
+		spin_unlock_irq(&i915->irq_lock);
  		return;
  	}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+	for_each_pipe_masked(i915, pipe, pipe_mask)
  		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
-				  dev_priv->de_irq_mask[pipe],
-				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
+				  i915->de_irq_mask[pipe],
+				  ~i915->de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915,
  				     u8 pipe_mask)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
  	enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- if (!intel_irqs_enabled(dev_priv)) {
-		spin_unlock_irq(&dev_priv->irq_lock);
+	if (!intel_irqs_enabled(i915)) {
+		spin_unlock_irq(&i915->irq_lock);
  		return;
  	}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+	for_each_pipe_masked(i915, pipe, pipe_mask)
  		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
/* make sure we're done processing display irqs */
-	synchronize_irq(dev_priv->drm.irq);
+	synchronize_irq(i915->drm.irq);
  }
static void cherryview_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
  	POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(dev_priv);
+	gen8_gt_irq_reset(i915);
GEN3_IRQ_RESET(uncore, GEN8_PCU_); - spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		vlv_display_irq_reset(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->display_irqs_enabled)
+		vlv_display_irq_reset(i915);
+	spin_unlock_irq(&i915->irq_lock);
  }
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct drm_i915_private *i915,
  				  const u32 hpd[HPD_NUM_PINS])
  {
  	struct intel_encoder *encoder;
  	u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
-		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+	for_each_intel_encoder(&i915->drm, encoder)
+		if (i915->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
  			enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
  }
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug;
@@ -3724,29 +3724,29 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
  	 * When CPU and PCH are on the same package, port A
  	 * HPD must be enabled in both north and south.
  	 */
-	if (HAS_PCH_LPT_LP(dev_priv))
+	if (HAS_PCH_LPT_LP(i915))
  		hotplug |= PORTA_HOTPLUG_ENABLE;
  	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
- if (HAS_PCH_IBX(dev_priv)) {
+	if (HAS_PCH_IBX(i915)) {
  		hotplug_irqs = SDE_HOTPLUG_MASK;
-		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
+		enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_ibx);
  	} else {
  		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
+		enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_cpt);
  	}
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+	ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+	ibx_hpd_detection_setup(i915);
  }
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_detection_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug;
@@ -3763,19 +3763,19 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
  	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
  }
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
-	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+	enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_icp);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+	ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv);
+	icp_hpd_detection_setup(i915);
  }
-static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_detection_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug;
@@ -3794,12 +3794,12 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
  }
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
  	u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+	enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_gen11);
  	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3807,18 +3807,18 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN11_DE_HPD_IMR, val);
  	POSTING_READ(GEN11_DE_HPD_IMR);
- gen11_hpd_detection_setup(dev_priv);
+	gen11_hpd_detection_setup(i915);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-		icp_hpd_irq_setup(dev_priv);
+	if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+		icp_hpd_irq_setup(i915);
  }
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct drm_i915_private *i915)
  {
  	u32 val, hotplug;
/* Display WA #1179 WaHardHangonHotPlug: cnp */
-	if (HAS_PCH_CNP(dev_priv)) {
+	if (HAS_PCH_CNP(i915)) {
  		val = I915_READ(SOUTH_CHICKEN1);
  		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
  		val |= CHASSIS_CLK_REQ_DURATION(0xf);
@@ -3838,19 +3838,19 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
  }
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
-	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+	enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_spt);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+	ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+	spt_hpd_detection_setup(i915);
  }
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug;
@@ -3866,33 +3866,33 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
  	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
  }
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
- if (INTEL_GEN(dev_priv) >= 8) {
+	if (INTEL_GEN(i915) >= 8) {
  		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
-		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
+		enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_bdw);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
-	} else if (INTEL_GEN(dev_priv) >= 7) {
+		bdw_update_port_irq(i915, hotplug_irqs, enabled_irqs);
+	} else if (INTEL_GEN(i915) >= 7) {
  		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
-		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
+		enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_ivb);
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+		ilk_update_display_irq(i915, hotplug_irqs, enabled_irqs);
  	} else {
  		hotplug_irqs = DE_DP_A_HOTPLUG;
-		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
+		enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_ilk);
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+		ilk_update_display_irq(i915, hotplug_irqs, enabled_irqs);
  	}
- ilk_hpd_detection_setup(dev_priv);
+	ilk_hpd_detection_setup(i915);
- ibx_hpd_irq_setup(dev_priv);
+	ibx_hpd_irq_setup(i915);
  }
-static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
+static void __bxt_hpd_detection_setup(struct drm_i915_private *i915,
  				      u32 enabled_irqs)
  {
  	u32 hotplug;
@@ -3911,106 +3911,106 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
  	 * for HPD detection logic, update it based on VBT fields.
  	 */
  	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
-	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
+	    intel_bios_is_port_hpd_inverted(i915, PORT_A))
  		hotplug |= BXT_DDIA_HPD_INVERT;
  	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
-	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
+	    intel_bios_is_port_hpd_inverted(i915, PORT_B))
  		hotplug |= BXT_DDIB_HPD_INVERT;
  	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
-	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
+	    intel_bios_is_port_hpd_inverted(i915, PORT_C))
  		hotplug |= BXT_DDIC_HPD_INVERT;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct drm_i915_private *i915)
  {
-	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
+	__bxt_hpd_detection_setup(i915, BXT_DE_PORT_HOTPLUG_MASK);
  }
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+	enabled_irqs = intel_hpd_enabled_irqs(i915, hpd_bxt);
  	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+	bdw_update_port_irq(i915, hotplug_irqs, enabled_irqs);
- __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
+	__bxt_hpd_detection_setup(i915, enabled_irqs);
  }
static void ibx_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+	if (HAS_PCH_NOP(i915))
  		return;
- if (HAS_PCH_IBX(dev_priv))
+	if (HAS_PCH_IBX(i915))
  		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
-	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+	else if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
  		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  	else
  		mask = SDE_GMBUS_CPT;
- gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
+	gen3_assert_iir_is_zero(&i915->uncore, SDEIIR);
  	I915_WRITE(SDEIMR, ~mask);
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
-	    HAS_PCH_LPT(dev_priv))
-		ibx_hpd_detection_setup(dev_priv);
+	if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
+	    HAS_PCH_LPT(i915))
+		ibx_hpd_detection_setup(i915);
  	else
-		spt_hpd_detection_setup(dev_priv);
+		spt_hpd_detection_setup(i915);
  }
static void gen5_gt_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0; - dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_DPF(dev_priv)) {
+	i915->gt_irq_mask = ~0;
+	if (HAS_L3_DPF(i915)) {
  		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
-		gt_irqs |= GT_PARITY_ERROR(dev_priv);
+		i915->gt_irq_mask = ~GT_PARITY_ERROR(i915);
+		gt_irqs |= GT_PARITY_ERROR(i915);
  	}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
-	if (IS_GEN(dev_priv, 5)) {
+	if (IS_GEN(i915, 5)) {
  		gt_irqs |= ILK_BSD_USER_INTERRUPT;
  	} else {
  		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  	}
- GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
+	GEN3_IRQ_INIT(uncore, GT, i915->gt_irq_mask, gt_irqs);
- if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(i915) >= 6) {
  		/*
  		 * RPS interrupts will get enabled/disabled on demand when RPS
  		 * itself is enabled/disabled.
  		 */
-		if (HAS_ENGINE(dev_priv, VECS0)) {
+		if (HAS_ENGINE(i915, VECS0)) {
  			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
-			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+			i915->pm_ier |= PM_VEBOX_USER_INTERRUPT;
  		}
- dev_priv->pm_imr = 0xffffffff;
-		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
+		i915->pm_imr = 0xffffffff;
+		GEN3_IRQ_INIT(uncore, GEN6_PM, i915->pm_imr, pm_irqs);
  	}
  }
static int ironlake_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 display_mask, extra_mask;
- if (INTEL_GEN(dev_priv) >= 7) {
+	if (INTEL_GEN(i915) >= 7) {
  		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
  		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -4025,78 +4025,78 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
  			      DE_DP_A_HOTPLUG);
  	}
- if (IS_HASWELL(dev_priv)) {
+	if (IS_HASWELL(i915)) {
  		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
-		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+		intel_psr_irq_control(i915, i915->psr.debug);
  		display_mask |= DE_EDP_PSR_INT_HSW;
  	}
- dev_priv->irq_mask = ~display_mask;
+	i915->irq_mask = ~display_mask;
ibx_irq_pre_postinstall(dev); - GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
+	GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
  		      display_mask | extra_mask);
gen5_gt_irq_postinstall(dev); - ilk_hpd_detection_setup(dev_priv);
+	ilk_hpd_detection_setup(i915);
ibx_irq_postinstall(dev); - if (IS_IRONLAKE_M(dev_priv)) {
+	if (IS_IRONLAKE_M(i915)) {
  		/* Enable PCU event interrupts
  		 *
  		 * spinlocking not required here for correctness since interrupt
  		 * setup is guaranteed to run in single-threaded context. But we
  		 * need it to make the assert_spin_locked happy. */
-		spin_lock_irq(&dev_priv->irq_lock);
-		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
-		spin_unlock_irq(&dev_priv->irq_lock);
+		spin_lock_irq(&i915->irq_lock);
+		ilk_enable_display_irq(i915, DE_PCU_EVENT);
+		spin_unlock_irq(&i915->irq_lock);
  	}
return 0;
  }
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct drm_i915_private *i915)
  {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- if (dev_priv->display_irqs_enabled)
+	if (i915->display_irqs_enabled)
  		return;
- dev_priv->display_irqs_enabled = true;
+	i915->display_irqs_enabled = true;
- if (intel_irqs_enabled(dev_priv)) {
-		vlv_display_irq_reset(dev_priv);
-		vlv_display_irq_postinstall(dev_priv);
+	if (intel_irqs_enabled(i915)) {
+		vlv_display_irq_reset(i915);
+		vlv_display_irq_postinstall(i915);
  	}
  }
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct drm_i915_private *i915)
  {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- if (!dev_priv->display_irqs_enabled)
+	if (!i915->display_irqs_enabled)
  		return;
- dev_priv->display_irqs_enabled = false;
+	i915->display_irqs_enabled = false;
- if (intel_irqs_enabled(dev_priv))
-		vlv_display_irq_reset(dev_priv);
+	if (intel_irqs_enabled(i915))
+		vlv_display_irq_reset(i915);
  }
static int valleyview_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
gen5_gt_irq_postinstall(dev); - spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		vlv_display_irq_postinstall(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->display_irqs_enabled)
+		vlv_display_irq_postinstall(i915);
+	spin_unlock_irq(&i915->irq_lock);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  	POSTING_READ(VLV_MASTER_IER);
@@ -4104,9 +4104,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
  	return 0;
  }
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
/* These are interrupts we'll toggle with the ring mask register */
  	u32 gt_interrupts[] = {
@@ -4126,21 +4126,21 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
  	};
- dev_priv->pm_ier = 0x0;
-	dev_priv->pm_imr = ~dev_priv->pm_ier;
+	i915->pm_ier = 0x0;
+	i915->pm_imr = ~i915->pm_ier;
  	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
  	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
  	/*
  	 * RPS interrupts will get enabled/disabled on demand when RPS itself
  	 * is enabled/disabled. Same wil be the case for GuC interrupts.
  	 */
-	GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+	GEN8_IRQ_INIT_NDX(uncore, GT, 2, i915->pm_imr, i915->pm_ier);
  	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
  }
-static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen8_de_irq_postinstall(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
  	u32 de_pipe_enables;
@@ -4149,84 +4149,84 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  	u32 de_misc_masked = GEN8_DE_EDP_PSR;
  	enum pipe pipe;
- if (INTEL_GEN(dev_priv) <= 10)
+	if (INTEL_GEN(i915) <= 10)
  		de_misc_masked |= GEN8_DE_MISC_GSE;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
  				  GEN9_AUX_CHANNEL_D;
-		if (IS_GEN9_LP(dev_priv))
+		if (IS_GEN9_LP(i915))
  			de_port_masked |= BXT_DE_PORT_GMBUS;
  	} else {
  		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  	}
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		de_port_masked |= ICL_AUX_CHANNEL_E;
- if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+	if (IS_CNL_WITH_PORT_F(i915) || INTEL_GEN(i915) >= 11)
  		de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  					   GEN8_PIPE_FIFO_UNDERRUN;
de_port_enables = de_port_masked;
-	if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
-	else if (IS_BROADWELL(dev_priv))
+	else if (IS_BROADWELL(i915))
  		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
-	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+	intel_psr_irq_control(i915, i915->psr.debug);
- for_each_pipe(dev_priv, pipe) {
-		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
+	for_each_pipe(i915, pipe) {
+		i915->de_irq_mask[pipe] = ~de_pipe_masked;
- if (intel_display_power_is_enabled(dev_priv,
+		if (intel_display_power_is_enabled(i915,
  				POWER_DOMAIN_PIPE(pipe)))
  			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
-					  dev_priv->de_irq_mask[pipe],
+					  i915->de_irq_mask[pipe],
  					  de_pipe_enables);
  	}
GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
  	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		u32 de_hpd_masked = 0;
  		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
  				     GEN11_DE_TBT_HOTPLUG_MASK;
GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
  			      de_hpd_enables);
-		gen11_hpd_detection_setup(dev_priv);
-	} else if (IS_GEN9_LP(dev_priv)) {
-		bxt_hpd_detection_setup(dev_priv);
-	} else if (IS_BROADWELL(dev_priv)) {
-		ilk_hpd_detection_setup(dev_priv);
+		gen11_hpd_detection_setup(i915);
+	} else if (IS_GEN9_LP(i915)) {
+		bxt_hpd_detection_setup(i915);
+	} else if (IS_BROADWELL(i915)) {
+		ilk_hpd_detection_setup(i915);
  	}
  }
static int gen8_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		ibx_irq_pre_postinstall(dev);
- gen8_gt_irq_postinstall(dev_priv);
-	gen8_de_irq_postinstall(dev_priv);
+	gen8_gt_irq_postinstall(i915);
+	gen8_de_irq_postinstall(i915);
- if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->uncore.regs);
+	gen8_master_intr_enable(i915->uncore.regs);
return 0;
  }
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_postinstall(struct drm_i915_private *i915)
  {
  	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
@@ -4247,8 +4247,8 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  	 * RPS interrupts will get enabled/disabled on demand when RPS itself
  	 * is enabled/disabled.
  	 */
-	dev_priv->pm_ier = 0x0;
-	dev_priv->pm_imr = ~dev_priv->pm_ier;
+	i915->pm_ier = 0x0;
+	i915->pm_imr = ~i915->pm_ier;
  	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
  	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
@@ -4259,36 +4259,36 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) static void icp_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 mask = SDE_GMBUS_ICP;
WARN_ON(I915_READ(SDEIER) != 0);
  	I915_WRITE(SDEIER, 0xffffffff);
  	POSTING_READ(SDEIER);
- gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
+	gen3_assert_iir_is_zero(&i915->uncore, SDEIIR);
  	I915_WRITE(SDEIMR, ~mask);
- icp_hpd_detection_setup(dev_priv);
+	icp_hpd_detection_setup(i915);
  }
static int gen11_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = dev->dev_private;
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+	if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
  		icp_irq_postinstall(dev);
- gen11_gt_irq_postinstall(dev_priv);
-	gen8_de_irq_postinstall(dev_priv);
+	gen11_gt_irq_postinstall(i915);
+	gen8_de_irq_postinstall(i915);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(dev_priv->uncore.regs);
+	gen11_master_intr_enable(i915->uncore.regs);
  	POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@@ -4296,14 +4296,14 @@ static int gen11_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- gen8_gt_irq_postinstall(dev_priv);
+	gen8_gt_irq_postinstall(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		vlv_display_irq_postinstall(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->display_irqs_enabled)
+		vlv_display_irq_postinstall(i915);
+	spin_unlock_irq(&i915->irq_lock);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  	POSTING_READ(GEN8_MASTER_IRQ);
@@ -4313,18 +4313,18 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void i8xx_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
- i9xx_pipestat_irq_reset(dev_priv);
+	i9xx_pipestat_irq_reset(i915);
GEN2_IRQ_RESET(uncore);
  }
static int i8xx_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u16 enable_mask;
intel_uncore_write16(uncore,
@@ -4333,7 +4333,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
  			       I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask =
+	i915->irq_mask =
  		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  		  I915_MASTER_ERROR_INTERRUPT);
@@ -4344,14 +4344,14 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
  		I915_MASTER_ERROR_INTERRUPT |
  		I915_USER_INTERRUPT;
- GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
+	GEN2_IRQ_INIT(uncore, i915->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
  	 * just to make the assert_spin_locked check happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	i915_enable_pipestat(i915, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(i915, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&i915->irq_lock);
return 0;
  }
@@ -4386,7 +4386,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915,
  	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
  }
-static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+static void i8xx_error_irq_handler(struct drm_i915_private *i915,
  				   u16 eir, u16 eir_stuck)
  {
  	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
@@ -4395,7 +4395,7 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
  		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
  }
-static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+static void i9xx_error_irq_ack(struct drm_i915_private *i915,
  			       u32 *eir, u32 *eir_stuck)
  {
  	u32 emr;
@@ -4423,7 +4423,7 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
  	I915_WRITE(EMR, emr | *eir_stuck);
  }
-static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_error_irq_handler(struct drm_i915_private *i915,
  				   u32 eir, u32 eir_stuck)
  {
  	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
@@ -4435,21 +4435,21 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
  static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
do {
  		u32 pipe_stats[I915_MAX_PIPES] = {};
  		u16 eir = 0, eir_stuck = 0;
  		u16 iir;
- iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
+		iir = intel_uncore_read16(&i915->uncore, GEN2_IIR);
  		if (iir == 0)
  			break;
@@ -4457,53 +4457,53 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be
  		 * signalled in iir */
-		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+		i9xx_pipestat_irq_ack(i915, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+			i8xx_error_irq_ack(i915, &eir, &eir_stuck);
- intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
+		intel_uncore_write16(&i915->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_breadcrumbs_irq(i915->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
+			i8xx_error_irq_handler(i915, eir, eir_stuck);
- i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+		i8xx_pipestat_irq_handler(i915, iir, pipe_stats);
  	} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
static void i915_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
- if (I915_HAS_HOTPLUG(dev_priv)) {
-		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	if (I915_HAS_HOTPLUG(i915)) {
+		i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
  		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  	}
- i9xx_pipestat_irq_reset(dev_priv);
+	i9xx_pipestat_irq_reset(i915);
GEN3_IRQ_RESET(uncore, GEN2_);
  }
static int i915_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
  			  I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask =
+	i915->irq_mask =
  		~(I915_ASLE_INTERRUPT |
  		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
@@ -4516,23 +4516,23 @@ static int i915_irq_postinstall(struct drm_device *dev)
  		I915_MASTER_ERROR_INTERRUPT |
  		I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev_priv)) {
+	if (I915_HAS_HOTPLUG(i915)) {
  		/* Enable in IER... */
  		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  		/* and unmask in IMR */
-		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+		i915->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  	}
- GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
+	GEN3_IRQ_INIT(uncore, GEN2_, i915->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
  	 * just to make the assert_spin_locked check happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	i915_enable_pipestat(i915, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(i915, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&i915->irq_lock);
- i915_enable_asle_pipestat(dev_priv);
+	i915_enable_asle_pipestat(i915);
return 0;
  }
@@ -4540,14 +4540,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
  static irqreturn_t i915_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
do {
  		u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4561,53 +4561,53 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED; - if (I915_HAS_HOTPLUG(dev_priv) &&
+		if (I915_HAS_HOTPLUG(i915) &&
  		    iir & I915_DISPLAY_PORT_INTERRUPT)
-			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			hotplug_status = i9xx_hpd_irq_ack(i915);
/* Call regardless, as some status bits might not be
  		 * signalled in iir */
-		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+		i9xx_pipestat_irq_ack(i915, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+			i9xx_error_irq_ack(i915, &eir, &eir_stuck);
I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_breadcrumbs_irq(i915->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
+			i9xx_error_irq_handler(i915, eir, eir_stuck);
if (hotplug_status)
-			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+			i9xx_hpd_irq_handler(i915, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+		i915_pipestat_irq_handler(i915, iir, pipe_stats);
  	} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
static void i965_irq_reset(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
  	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- i9xx_pipestat_irq_reset(dev_priv);
+	i9xx_pipestat_irq_reset(i915);
GEN3_IRQ_RESET(uncore, GEN2_);
  }
static int i965_irq_postinstall(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 enable_mask;
  	u32 error_mask;
@@ -4615,7 +4615,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
  	 * Enable some error detection, note the instruction error mask
  	 * bit is reserved, so we leave it masked.
  	 */
-	if (IS_G4X(dev_priv)) {
+	if (IS_G4X(i915)) {
  		error_mask = ~(GM45_ERROR_PAGE_TABLE |
  			       GM45_ERROR_MEM_PRIV |
  			       GM45_ERROR_CP_PRIV |
@@ -4627,7 +4627,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
  	I915_WRITE(EMR, error_mask);
/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask =
+	i915->irq_mask =
  		~(I915_ASLE_INTERRUPT |
  		  I915_DISPLAY_PORT_INTERRUPT |
  		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -4642,43 +4642,43 @@ static int i965_irq_postinstall(struct drm_device *dev)
  		I915_MASTER_ERROR_INTERRUPT |
  		I915_USER_INTERRUPT;
- if (IS_G4X(dev_priv))
+	if (IS_G4X(i915))
  		enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
+	GEN3_IRQ_INIT(uncore, GEN2_, i915->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
  	 * just to make the assert_spin_locked check happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	i915_enable_pipestat(i915, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	i915_enable_pipestat(i915, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(i915, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&i915->irq_lock);
- i915_enable_asle_pipestat(dev_priv);
+	i915_enable_asle_pipestat(i915);
return 0;
  }
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct drm_i915_private *i915)
  {
  	u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
/* Note HDMI and DP share hotplug bits */
  	/* enable bits are the same for all generations */
-	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+	hotplug_en = intel_hpd_enabled_irqs(i915, hpd_mask_i915);
  	/* Programming the CRT detection parameters tends
  	   to generate a spurious hotplug event about three
  	   seconds later.  So just do it once.
  	*/
-	if (IS_G4X(dev_priv))
+	if (IS_G4X(i915))
  		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
/* Ignore TV since it's buggy */
-	i915_hotplug_interrupt_update_locked(dev_priv,
+	i915_hotplug_interrupt_update_locked(i915,
  					     HOTPLUG_INT_EN_MASK |
  					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
  					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -4688,14 +4688,14 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
  static irqreturn_t i965_irq_handler(int irq, void *arg)
  {
  	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+	if (!intel_irqs_enabled(i915))
  		return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
+	disable_rpm_wakeref_asserts(i915);
do {
  		u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4710,76 +4710,76 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
  		ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
-			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			hotplug_status = i9xx_hpd_irq_ack(i915);
/* Call regardless, as some status bits might not be
  		 * signalled in iir */
-		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+		i9xx_pipestat_irq_ack(i915, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+			i9xx_error_irq_ack(i915, &eir, &eir_stuck);
I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_breadcrumbs_irq(i915->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+			intel_engine_breadcrumbs_irq(i915->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
-			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
+			i9xx_error_irq_handler(i915, eir, eir_stuck);
if (hotplug_status)
-			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+			i9xx_hpd_irq_handler(i915, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+		i965_pipestat_irq_handler(i915, iir, pipe_stats);
  	} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+	enable_rpm_wakeref_asserts(i915);
return ret;
  }
/**
   * intel_irq_init - initializes irq support
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function initializes all the irq support including work items, timers
   * and all the vtables. It does not setup the interrupt itself though.
   */
-void intel_irq_init(struct drm_i915_private *dev_priv)
+void intel_irq_init(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_device *dev = &i915->drm;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	int i;
- if (IS_I945GM(dev_priv))
-		i945gm_vblank_work_init(dev_priv);
+	if (IS_I945GM(i915))
+		i945gm_vblank_work_init(i915);
- intel_hpd_init_work(dev_priv);
+	intel_hpd_init_work(i915);
INIT_WORK(&rps->work, gen6_pm_rps_work); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+	INIT_WORK(&i915->l3_parity.error_work, ivybridge_parity_work);
  	for (i = 0; i < MAX_L3_SLICES; ++i)
-		dev_priv->l3_parity.remap_info[i] = NULL;
+		i915->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
-		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+	if (HAS_GUC_SCHED(i915) && INTEL_GEN(i915) < 11)
+		i915->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
-	if (IS_VALLEYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915))
  		/* WaGsvRC0ResidencyMethod:vlv */
-		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+		i915->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
  	else
-		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
+		i915->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
  					   GEN6_PM_RP_DOWN_THRESHOLD |
  					   GEN6_PM_RP_DOWN_TIMEOUT);
/* We share the register with other engine */
-	if (INTEL_GEN(dev_priv) > 9)
-		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
+	if (INTEL_GEN(i915) > 9)
+		GEM_WARN_ON(i915->pm_rps_events & 0xffff0000);
rps->pm_intrmsk_mbz = 0; @@ -4789,15 +4789,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
  	 *
  	 * TODO: verify if this can be reproduced on VLV,CHV.
  	 */
-	if (INTEL_GEN(dev_priv) <= 7)
+	if (INTEL_GEN(i915) <= 7)
  		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
- if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
  		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
-	else if (INTEL_GEN(dev_priv) >= 3)
+	else if (INTEL_GEN(i915) >= 3)
  		dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->vblank_disable_immediate = true;
@@ -4808,83 +4808,83 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
  	 * outside of the power domain. We defer setting up the display irqs
  	 * in this case to the runtime pm.
  	 */
-	dev_priv->display_irqs_enabled = true;
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->display_irqs_enabled = false;
+	i915->display_irqs_enabled = true;
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->display_irqs_enabled = false;
- dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+	i915->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
  	/* If we have MST support, we want to avoid doing short HPD IRQ storm
  	 * detection, as short HPD storms will occur as a natural part of
  	 * sideband messaging with MST.
  	 * On older platforms however, IRQ storms can occur with both long and
  	 * short pulses, as seen on some G4x systems.
  	 */
-	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
+	i915->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
  	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		dev->driver->irq_handler = cherryview_irq_handler;
  		dev->driver->irq_preinstall = cherryview_irq_reset;
  		dev->driver->irq_postinstall = cherryview_irq_postinstall;
  		dev->driver->irq_uninstall = cherryview_irq_reset;
  		dev->driver->enable_vblank = i965_enable_vblank;
  		dev->driver->disable_vblank = i965_disable_vblank;
-		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+		i915->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (IS_VALLEYVIEW(i915)) {
  		dev->driver->irq_handler = valleyview_irq_handler;
  		dev->driver->irq_preinstall = valleyview_irq_reset;
  		dev->driver->irq_postinstall = valleyview_irq_postinstall;
  		dev->driver->irq_uninstall = valleyview_irq_reset;
  		dev->driver->enable_vblank = i965_enable_vblank;
  		dev->driver->disable_vblank = i965_disable_vblank;
-		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-	} else if (INTEL_GEN(dev_priv) >= 11) {
+		i915->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (INTEL_GEN(i915) >= 11) {
  		dev->driver->irq_handler = gen11_irq_handler;
  		dev->driver->irq_preinstall = gen11_irq_reset;
  		dev->driver->irq_postinstall = gen11_irq_postinstall;
  		dev->driver->irq_uninstall = gen11_irq_reset;
  		dev->driver->enable_vblank = gen8_enable_vblank;
  		dev->driver->disable_vblank = gen8_disable_vblank;
-		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+		i915->display.hpd_irq_setup = gen11_hpd_irq_setup;
+	} else if (INTEL_GEN(i915) >= 8) {
  		dev->driver->irq_handler = gen8_irq_handler;
  		dev->driver->irq_preinstall = gen8_irq_reset;
  		dev->driver->irq_postinstall = gen8_irq_postinstall;
  		dev->driver->irq_uninstall = gen8_irq_reset;
  		dev->driver->enable_vblank = gen8_enable_vblank;
  		dev->driver->disable_vblank = gen8_disable_vblank;
-		if (IS_GEN9_LP(dev_priv))
-			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
-			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+		if (IS_GEN9_LP(i915))
+			i915->display.hpd_irq_setup = bxt_hpd_irq_setup;
+		else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
+			i915->display.hpd_irq_setup = spt_hpd_irq_setup;
  		else
-			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
+			i915->display.hpd_irq_setup = ilk_hpd_irq_setup;
+	} else if (HAS_PCH_SPLIT(i915)) {
  		dev->driver->irq_handler = ironlake_irq_handler;
  		dev->driver->irq_preinstall = ironlake_irq_reset;
  		dev->driver->irq_postinstall = ironlake_irq_postinstall;
  		dev->driver->irq_uninstall = ironlake_irq_reset;
  		dev->driver->enable_vblank = ironlake_enable_vblank;
  		dev->driver->disable_vblank = ironlake_disable_vblank;
-		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+		i915->display.hpd_irq_setup = ilk_hpd_irq_setup;
  	} else {
-		if (IS_GEN(dev_priv, 2)) {
+		if (IS_GEN(i915, 2)) {
  			dev->driver->irq_preinstall = i8xx_irq_reset;
  			dev->driver->irq_postinstall = i8xx_irq_postinstall;
  			dev->driver->irq_handler = i8xx_irq_handler;
  			dev->driver->irq_uninstall = i8xx_irq_reset;
  			dev->driver->enable_vblank = i8xx_enable_vblank;
  			dev->driver->disable_vblank = i8xx_disable_vblank;
-		} else if (IS_I945GM(dev_priv)) {
+		} else if (IS_I945GM(i915)) {
  			dev->driver->irq_preinstall = i915_irq_reset;
  			dev->driver->irq_postinstall = i915_irq_postinstall;
  			dev->driver->irq_uninstall = i915_irq_reset;
  			dev->driver->irq_handler = i915_irq_handler;
  			dev->driver->enable_vblank = i945gm_enable_vblank;
  			dev->driver->disable_vblank = i945gm_disable_vblank;
-		} else if (IS_GEN(dev_priv, 3)) {
+		} else if (IS_GEN(i915, 3)) {
  			dev->driver->irq_preinstall = i915_irq_reset;
  			dev->driver->irq_postinstall = i915_irq_postinstall;
  			dev->driver->irq_uninstall = i915_irq_reset;
@@ -4899,8 +4899,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
  			dev->driver->enable_vblank = i965_enable_vblank;
  			dev->driver->disable_vblank = i965_disable_vblank;
  		}
-		if (I915_HAS_HOTPLUG(dev_priv))
-			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+		if (I915_HAS_HOTPLUG(i915))
+			i915->display.hpd_irq_setup = i915_hpd_irq_setup;
  	}
  }
@@ -4923,7 +4923,7 @@ void intel_irq_fini(struct drm_i915_private *i915) /**
   * intel_irq_install - enables the hardware interrupt
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function enables the hardware interrupt handling, but leaves the hotplug
   * handling still disabled. It is called after intel_irq_init().
@@ -4932,56 +4932,56 @@ void intel_irq_fini(struct drm_i915_private *i915)
   * but don't want to deal with the hassle of concurrent probe and hotplug
   * workers. Hence the split into this two-stage approach.
   */
-int intel_irq_install(struct drm_i915_private *dev_priv)
+int intel_irq_install(struct drm_i915_private *i915)
  {
  	/*
  	 * We enable some interrupt sources in our postinstall hooks, so mark
  	 * interrupts as enabled _before_ actually enabling them to avoid
  	 * special cases in our ordering checks.
  	 */
-	dev_priv->runtime_pm.irqs_enabled = true;
+	i915->runtime_pm.irqs_enabled = true;
- return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
+	return drm_irq_install(&i915->drm, i915->drm.pdev->irq);
  }
/**
   * intel_irq_uninstall - finilizes all irq handling
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This stops interrupt and hotplug handling and unregisters and frees all
   * resources acquired in the init functions.
   */
-void intel_irq_uninstall(struct drm_i915_private *dev_priv)
+void intel_irq_uninstall(struct drm_i915_private *i915)
  {
-	drm_irq_uninstall(&dev_priv->drm);
-	intel_hpd_cancel_work(dev_priv);
-	dev_priv->runtime_pm.irqs_enabled = false;
+	drm_irq_uninstall(&i915->drm);
+	intel_hpd_cancel_work(i915);
+	i915->runtime_pm.irqs_enabled = false;
  }
/**
   * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function is used to disable interrupts at runtime, both in the runtime
   * pm and the system suspend/resume code.
   */
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *i915)
  {
-	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
-	dev_priv->runtime_pm.irqs_enabled = false;
-	synchronize_irq(dev_priv->drm.irq);
+	i915->drm.driver->irq_uninstall(&i915->drm);
+	i915->runtime_pm.irqs_enabled = false;
+	synchronize_irq(i915->drm.irq);
  }
/**
   * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function is used to enable interrupts at runtime, both in the runtime
   * pm and the system suspend/resume code.
   */
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *i915)
  {
-	dev_priv->runtime_pm.irqs_enabled = true;
-	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
-	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
+	i915->runtime_pm.irqs_enabled = true;
+	i915->drm.driver->irq_preinstall(&i915->drm);
+	i915->drm.driver->irq_postinstall(&i915->drm);
  }
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index cb25dd213308..604504993513 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -13,77 +13,77 @@
  struct drm_i915_private;
  struct intel_crtc;
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
-int intel_irq_install(struct drm_i915_private *dev_priv);
-void intel_irq_uninstall(struct drm_i915_private *dev_priv);
+extern void intel_irq_init(struct drm_i915_private *i915);
+extern void intel_irq_fini(struct drm_i915_private *i915);
+int intel_irq_install(struct drm_i915_private *i915);
+void intel_irq_uninstall(struct drm_i915_private *i915);
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+u32 i915_pipestat_enable_mask(struct drm_i915_private *i915,
  			      enum pipe pipe);
  void
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe,
  		     u32 status_mask);
void
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe,
  		      u32 status_mask);
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_enable_display_irqs(struct drm_i915_private *i915);
+void valleyview_disable_display_irqs(struct drm_i915_private *i915);
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
  				   u32 mask,
  				   u32 bits);
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct drm_i915_private *i915,
  			    u32 interrupt_mask,
  			    u32 enabled_irq_mask);
  static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
  {
-	ilk_update_display_irq(dev_priv, bits, bits);
+	ilk_update_display_irq(i915, bits, bits);
  }
  static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
  {
-	ilk_update_display_irq(dev_priv, bits, 0);
+	ilk_update_display_irq(i915, bits, 0);
  }
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+void bdw_update_pipe_irq(struct drm_i915_private *i915,
  			 enum pipe pipe,
  			 u32 interrupt_mask,
  			 u32 enabled_irq_mask);
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
+static inline void bdw_enable_pipe_irq(struct drm_i915_private *i915,
  				       enum pipe pipe, u32 bits)
  {
-	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
+	bdw_update_pipe_irq(i915, pipe, bits, bits);
  }
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
+static inline void bdw_disable_pipe_irq(struct drm_i915_private *i915,
  					enum pipe pipe, u32 bits)
  {
-	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
+	bdw_update_pipe_irq(i915, pipe, bits, 0);
  }
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct drm_i915_private *i915,
  				  u32 interrupt_mask,
  				  u32 enabled_irq_mask);
  static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
  {
-	ibx_display_interrupt_update(dev_priv, bits, bits);
+	ibx_display_interrupt_update(i915, bits, bits);
  }
  static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
  {
-	ibx_display_interrupt_update(dev_priv, bits, 0);
+	ibx_display_interrupt_update(i915, bits, 0);
  }
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
+void gen5_enable_gt_irq(struct drm_i915_private *i915, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *i915, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *i915, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *i915, u32 mask);
+void gen11_reset_rps_interrupts(struct drm_i915_private *i915);
+void gen6_reset_rps_interrupts(struct drm_i915_private *i915);
+void gen6_enable_rps_interrupts(struct drm_i915_private *i915);
+void gen6_disable_rps_interrupts(struct drm_i915_private *i915);
+void gen6_rps_reset_ei(struct drm_i915_private *i915);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
  					    u32 mask)
@@ -91,25 +91,25 @@ static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
  	return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
  }
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *i915);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *i915);
+static inline bool intel_irqs_enabled(struct drm_i915_private *i915)
  {
  	/*
  	 * We only use drm_irq_uninstall() at unload and VT switch, so
  	 * this is the only thing we need to check.
  	 */
-	return dev_priv->runtime_pm.irqs_enabled;
+	return i915->runtime_pm.irqs_enabled;
  }
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct drm_i915_private *i915,
  				     u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915,
  				     u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_reset_guc_interrupts(struct drm_i915_private *i915);
+void gen9_enable_guc_interrupts(struct drm_i915_private *i915);
+void gen9_disable_guc_interrupts(struct drm_i915_private *i915);
  void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
  void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
  void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 79f8ec756362..2bd664c16ba0 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -94,7 +94,7 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
  	return false;
  }
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
+void i915_memcpy_init_early(struct drm_i915_private *i915)
  {
  	/*
  	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4acdb94555b7..7c97cd90e973 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -64,28 +64,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_bdw(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"d6de6f55-e526-4f79-a6a6-d7315c09044e",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index 0e667f1a8aa1..69479e5b436f 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_BDW_H__
  #define __I915_OA_BDW_H__
-extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_bdw(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index a44195c39923..5ec236b33ecc 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -62,28 +62,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_bxt(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"5ee72f5c-092f-421e-8b70-225f7c3e9612",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 679e92cf4f1d..47d8327f38a1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_BXT_H__
  #define __I915_OA_BXT_H__
-extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_bxt(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 7f60d51b8761..3972cd7dcbfc 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_cflgt2(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 4d6025559bbe..434545c9c5d9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_CFLGT2_H__
  #define __I915_OA_CFLGT2_H__
-extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index a92c38e3a0ce..c00b9d9d6ddf 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_cflgt3(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"577e8e2c-3fa0-4875-8743-3538d585e3b0",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index 0697f4077402..2c23eda056aa 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_CFLGT3_H__
  #define __I915_OA_CFLGT3_H__
-extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 71ec889a0114..5829d24d0f7c 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_chv(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"4a534b07-cba3-414d-8d60-874830e883aa",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index 0986eae3135f..4bb3b6be7bd2 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_CHV_H__
  #define __I915_OA_CHV_H__
-extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_chv(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index 5c23d883d6c9..fcf08b9e7c73 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -75,28 +75,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_cnl(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"db41edd4-d8e7-4730-ad11-b9a2d6833503",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index e830a406aff2..c70101655049 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_CNL_H__
  #define __I915_OA_CNL_H__
-extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_cnl(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 4bdda66df7d2..fdb38b9ea9bb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -62,28 +62,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_glk(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 06dedf991edb..5d07986827c8 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_GLK_H__
  #define __I915_OA_GLK_H__
-extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_glk(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index cc6526fdd2bd..640301c9fb9d 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -92,28 +92,28 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
  }
void
-i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_hsw(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"403d8832-1a27-4aa6-a64e-f5389ce7b212",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
+	i915->perf.oa.test_config.mux_regs = mux_config_render_basic;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 3d0c870cd0bd..7812a9df8647 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_HSW_H__
  #define __I915_OA_HSW_H__
-extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_hsw(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index baa51427a543..2a8c6de619ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -72,28 +72,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_icl(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"a291665e-244b-4b76-9b9a-01de9d3c8068",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index 24eaa97d61ba..a7c50d31e678 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_ICL_H__
  #define __I915_OA_ICL_H__
-extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_icl(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 168e49ab0d4d..975311df3d62 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_kblgt2(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"baa3c7e4-52b6-4b85-801e-465a94b746dd",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index a55398a904de..53655de7b976 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_KBLGT2_H__
  #define __I915_OA_KBLGT2_H__
-extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index 6ffa553c388e..ed39f8dd9af2 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_kblgt3(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"f1792f32-6db2-4b50-b4b2-557128f1688d",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index 3ddd3483b7cc..60ebe2b20ada 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_KBLGT3_H__
  #define __I915_OA_KBLGT3_H__
-extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index 7ce6ee851d43..60b4dbcd4c59 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -62,28 +62,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_sklgt2(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index be6256037239..ee6f747502ea 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_SKLGT2_H__
  #define __I915_OA_SKLGT2_H__
-extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index 086ca2631e1c..8c23cf712a7a 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_sklgt3(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"2b985803-d3c9-4629-8a4f-634bfecba0e8",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 650beb068e56..35ccb4125d71 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_SKLGT3_H__
  #define __I915_OA_SKLGT3_H__
-extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index b291a6eb8a87..b846da6198f8 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -63,28 +63,28 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
  }
void
-i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
+i915_perf_load_test_config_sklgt4(struct drm_i915_private *i915)
  {
-	strlcpy(dev_priv->perf.oa.test_config.uuid,
+	strlcpy(i915->perf.oa.test_config.uuid,
  		"882fa433-1f4a-4a67-a962-c741888fe5f5",
-		sizeof(dev_priv->perf.oa.test_config.uuid));
-	dev_priv->perf.oa.test_config.id = 1;
+		sizeof(i915->perf.oa.test_config.uuid));
+	i915->perf.oa.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
-	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+	i915->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	i915->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
-	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+	i915->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	i915->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
-	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+	i915->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	i915->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
-	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+	i915->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
+	i915->perf.oa.test_config.sysfs_metric.attrs = i915->perf.oa.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+	i915->perf.oa.test_config.attrs[0] = &i915->perf.oa.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
-	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
-	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+	i915->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	i915->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	i915->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
  }
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 8dcf849d131e..f3f6d7bf28e0 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -10,6 +10,6 @@
  #ifndef __I915_OA_SKLGT4_H__
  #define __I915_OA_SKLGT4_H__
-extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d92ddfada262..e0ef77ec4da3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -366,7 +366,7 @@ struct perf_open_properties {
  	int oa_period_exponent;
  };
-static void free_oa_config(struct drm_i915_private *dev_priv,
+static void free_oa_config(struct drm_i915_private *i915,
  			   struct i915_oa_config *oa_config)
  {
  	if (!PTR_ERR(oa_config->flex_regs))
@@ -378,48 +378,48 @@ static void free_oa_config(struct drm_i915_private *dev_priv,
  	kfree(oa_config);
  }
-static void put_oa_config(struct drm_i915_private *dev_priv,
+static void put_oa_config(struct drm_i915_private *i915,
  			  struct i915_oa_config *oa_config)
  {
  	if (!atomic_dec_and_test(&oa_config->ref_count))
  		return;
- free_oa_config(dev_priv, oa_config);
+	free_oa_config(i915, oa_config);
  }
-static int get_oa_config(struct drm_i915_private *dev_priv,
+static int get_oa_config(struct drm_i915_private *i915,
  			 int metrics_set,
  			 struct i915_oa_config **out_config)
  {
  	int ret;
if (metrics_set == 1) {
-		*out_config = &dev_priv->perf.oa.test_config;
-		atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
+		*out_config = &i915->perf.oa.test_config;
+		atomic_inc(&i915->perf.oa.test_config.ref_count);
  		return 0;
  	}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+	ret = mutex_lock_interruptible(&i915->perf.metrics_lock);
  	if (ret)
  		return ret;
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
+	*out_config = idr_find(&i915->perf.metrics_idr, metrics_set);
  	if (!*out_config)
  		ret = -EINVAL;
  	else
  		atomic_inc(&(*out_config)->ref_count);
- mutex_unlock(&dev_priv->perf.metrics_lock);
+	mutex_unlock(&i915->perf.metrics_lock);
return ret;
  }
-static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen8_oa_hw_tail_read(struct drm_i915_private *i915)
  {
  	return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
  }
-static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen7_oa_hw_tail_read(struct drm_i915_private *i915)
  {
  	u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
@@ -428,7 +428,7 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) /**
   * oa_buffer_check_unlocked - check for data and update tail ptr state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This is either called via fops (for blocking reads in user ctx) or the poll
   * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
@@ -450,9 +450,9 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
   *
   * Returns: %true if the OA buffer contains data, else %false
   */
-static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
+static bool oa_buffer_check_unlocked(struct drm_i915_private *i915)
  {
-	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+	int report_size = i915->perf.oa.oa_buffer.format_size;
  	unsigned long flags;
  	unsigned int aged_idx;
  	u32 head, hw_tail, aged_tail, aging_tail;
@@ -462,19 +462,19 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  	 * could result in an OA buffer reset which might reset the head,
  	 * tails[] and aged_tail state.
  	 */
-	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/* NB: The head we observe here might effectively be a little out of
  	 * date (between head and tails[aged_idx].offset if there is currently
  	 * a read() in progress.
  	 */
-	head = dev_priv->perf.oa.oa_buffer.head;
+	head = i915->perf.oa.oa_buffer.head;
- aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
-	aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
-	aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
+	aged_idx = i915->perf.oa.oa_buffer.aged_tail_idx;
+	aged_tail = i915->perf.oa.oa_buffer.tails[aged_idx].offset;
+	aging_tail = i915->perf.oa.oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
+	hw_tail = i915->perf.oa.ops.oa_hw_tail_read(i915);
/* The tail pointer increases in 64 byte increments,
  	 * not in report_size steps...
@@ -494,16 +494,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  	 * available) without needing to wait for a later hrtimer callback.
  	 */
  	if (aging_tail != INVALID_TAIL_PTR &&
-	    ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
+	    ((now - i915->perf.oa.oa_buffer.aging_timestamp) >
  	     OA_TAIL_MARGIN_NSEC)) {
aged_idx ^= 1;
-		dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
+		i915->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
aged_tail = aging_tail; /* Mark that we need a new pointer to start aging... */
-		dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
+		i915->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
  		aging_tail = INVALID_TAIL_PTR;
  	}
@@ -518,7 +518,7 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  	if (aging_tail == INVALID_TAIL_PTR &&
  	    (aged_tail == INVALID_TAIL_PTR ||
  	     OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
-		struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
+		struct i915_vma *vma = i915->perf.oa.oa_buffer.vma;
  		u32 gtt_offset = i915_ggtt_offset(vma);
/* Be paranoid and do a bounds check on the pointer read back
@@ -527,16 +527,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  		 */
  		if (hw_tail >= gtt_offset &&
  		    hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
-			dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
+			i915->perf.oa.oa_buffer.tails[!aged_idx].offset =
  				aging_tail = hw_tail;
-			dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
+			i915->perf.oa.oa_buffer.aging_timestamp = now;
  		} else {
  			DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
  				  hw_tail);
  		}
  	}
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
return aged_tail == INVALID_TAIL_PTR ?
  		false : OA_TAKEN(aged_tail, head) >= report_size;
@@ -599,8 +599,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
  			    size_t *offset,
  			    const u8 *report)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
-	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+	struct drm_i915_private *i915 = stream->i915;
+	int report_size = i915->perf.oa.oa_buffer.format_size;
  	struct drm_i915_perf_record_header header;
  	u32 sample_flags = stream->sample_flags;
@@ -651,10 +651,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  				  size_t count,
  				  size_t *offset)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
-	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
-	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
-	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+	struct drm_i915_private *i915 = stream->i915;
+	int report_size = i915->perf.oa.oa_buffer.format_size;
+	u8 *oa_buf_base = i915->perf.oa.oa_buffer.vaddr;
+	u32 gtt_offset = i915_ggtt_offset(i915->perf.oa.oa_buffer.vma);
  	u32 mask = (OA_BUFFER_SIZE - 1);
  	size_t start_offset = *offset;
  	unsigned long flags;
@@ -666,13 +666,13 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  	if (WARN_ON(!stream->enabled))
  		return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
-	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
-	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+	head = i915->perf.oa.oa_buffer.head;
+	aged_tail_idx = i915->perf.oa.oa_buffer.aged_tail_idx;
+	tail = i915->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/*
  	 * An invalid tail pointer here means we're still waiting for the poll
@@ -736,12 +736,12 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
  			  OAREPORT_REASON_MASK);
  		if (reason == 0) {
-			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+			if (__ratelimit(&i915->perf.oa.spurious_report_rs))
  				DRM_NOTE("Skipping spurious, invalid OA report\n");
  			continue;
  		}
- ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
+		ctx_id = report32[2] & i915->perf.oa.specific_ctx_id_mask;
/*
  		 * Squash whatever is in the CTX_ID field if it's marked as
@@ -751,7 +751,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  		 * Note: that we don't clear the valid_ctx_bit so userspace can
  		 * understand that the ID has been squashed by the kernel.
  		 */
-		if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
+		if (!(report32[0] & i915->perf.oa.gen8_valid_ctx_bit))
  			ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -785,18 +785,18 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  		 * switches since it's not-uncommon for periodic samples to
  		 * identify a switch before any 'context switch' report.
  		 */
-		if (!dev_priv->perf.oa.exclusive_stream->ctx ||
-		    dev_priv->perf.oa.specific_ctx_id == ctx_id ||
-		    (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
-		     dev_priv->perf.oa.specific_ctx_id) ||
+		if (!i915->perf.oa.exclusive_stream->ctx ||
+		    i915->perf.oa.specific_ctx_id == ctx_id ||
+		    (i915->perf.oa.oa_buffer.last_ctx_id ==
+		     i915->perf.oa.specific_ctx_id) ||
  		    reason & OAREPORT_REASON_CTX_SWITCH) {
/*
  			 * While filtering for a single context we avoid
  			 * leaking the IDs of other contexts.
  			 */
-			if (dev_priv->perf.oa.exclusive_stream->ctx &&
-			    dev_priv->perf.oa.specific_ctx_id != ctx_id) {
+			if (i915->perf.oa.exclusive_stream->ctx &&
+			    i915->perf.oa.specific_ctx_id != ctx_id) {
  				report32[2] = INVALID_CTX_ID;
  			}
@@ -805,7 +805,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  			if (ret)
  				break;
- dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
+			i915->perf.oa.oa_buffer.last_ctx_id = ctx_id;
  		}
/*
@@ -819,7 +819,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  	}
if (start_offset != *offset) {
-		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+		spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/*
  		 * We removed the gtt_offset for the copy loop above, indexing
@@ -828,9 +828,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  		head += gtt_offset;
I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
-		dev_priv->perf.oa.oa_buffer.head = head;
+		i915->perf.oa.oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+		spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
  	}
return ret;
@@ -861,11 +861,11 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
  			size_t count,
  			size_t *offset)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	u32 oastatus;
  	int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+	if (WARN_ON(!i915->perf.oa.oa_buffer.vaddr))
  		return -EIO;
oastatus = I915_READ(GEN8_OASTATUS);
@@ -891,10 +891,10 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
  			return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
-			  dev_priv->perf.oa.period_exponent);
+			  i915->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
-		dev_priv->perf.oa.ops.oa_enable(stream);
+		i915->perf.oa.ops.oa_disable(stream);
+		i915->perf.oa.ops.oa_enable(stream);
/*
  		 * Note: .oa_enable() is expected to re-init the oabuffer and
@@ -940,10 +940,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  				  size_t count,
  				  size_t *offset)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
-	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
-	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
-	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+	struct drm_i915_private *i915 = stream->i915;
+	int report_size = i915->perf.oa.oa_buffer.format_size;
+	u8 *oa_buf_base = i915->perf.oa.oa_buffer.vaddr;
+	u32 gtt_offset = i915_ggtt_offset(i915->perf.oa.oa_buffer.vma);
  	u32 mask = (OA_BUFFER_SIZE - 1);
  	size_t start_offset = *offset;
  	unsigned long flags;
@@ -955,13 +955,13 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  	if (WARN_ON(!stream->enabled))
  		return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
-	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
-	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+	head = i915->perf.oa.oa_buffer.head;
+	aged_tail_idx = i915->perf.oa.oa_buffer.aged_tail_idx;
+	tail = i915->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/* An invalid tail pointer here means we're still waiting for the poll
  	 * hrtimer callback to give us a pointer
@@ -1014,7 +1014,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  		 * copying it to userspace...
  		 */
  		if (report32[0] == 0) {
-			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+			if (__ratelimit(&i915->perf.oa.spurious_report_rs))
  				DRM_NOTE("Skipping spurious, invalid OA report\n");
  			continue;
  		}
@@ -1033,7 +1033,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  	}
if (start_offset != *offset) {
-		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+		spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/* We removed the gtt_offset for the copy loop above, indexing
  		 * relative to oa_buf_base so put back here...
@@ -1043,9 +1043,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  		I915_WRITE(GEN7_OASTATUS2,
  			   ((head & GEN7_OASTATUS2_HEAD_MASK) |
  			    GEN7_OASTATUS2_MEM_SELECT_GGTT));
-		dev_priv->perf.oa.oa_buffer.head = head;
+		i915->perf.oa.oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+		spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
  	}
return ret;
@@ -1072,11 +1072,11 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
  			size_t count,
  			size_t *offset)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	u32 oastatus1;
  	int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+	if (WARN_ON(!i915->perf.oa.oa_buffer.vaddr))
  		return -EIO;
oastatus1 = I915_READ(GEN7_OASTATUS1);
@@ -1086,7 +1086,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
  	 * may be updated asynchronously) so we ignore status bits
  	 * that have already been reported to userspace.
  	 */
-	oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+	oastatus1 &= ~i915->perf.oa.gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
  	 *
@@ -1115,10 +1115,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
  			return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
-			  dev_priv->perf.oa.period_exponent);
+			  i915->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
-		dev_priv->perf.oa.ops.oa_enable(stream);
+		i915->perf.oa.ops.oa_disable(stream);
+		i915->perf.oa.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
  	}
@@ -1128,7 +1128,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
  				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  		if (ret)
  			return ret;
-		dev_priv->perf.oa.gen7_latched_oastatus1 |=
+		i915->perf.oa.gen7_latched_oastatus1 |=
  			GEN7_OASTATUS1_REPORT_LOST;
  	}
@@ -1151,14 +1151,14 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
   */
  static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
/* We would wait indefinitely if periodic sampling is not enabled */
-	if (!dev_priv->perf.oa.periodic)
+	if (!i915->perf.oa.periodic)
  		return -EIO;
- return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
-					oa_buffer_check_unlocked(dev_priv));
+	return wait_event_interruptible(i915->perf.oa.poll_wq,
+					oa_buffer_check_unlocked(i915));
  }
/**
@@ -1175,9 +1175,9 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  			      struct file *file,
  			      poll_table *wait)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+	poll_wait(file, &i915->perf.oa.poll_wq, wait);
  }
/**
@@ -1197,9 +1197,9 @@ static int i915_oa_read(struct i915_perf_stream *stream,
  			size_t count,
  			size_t *offset)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+	return i915->perf.oa.ops.read(stream, buf, count, offset);
  }
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
@@ -1248,7 +1248,7 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
   */
  static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *i915 = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	struct intel_context *ce;
ce = oa_pin_context(i915, stream->ctx);
@@ -1330,17 +1330,17 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
   */
  static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	struct intel_context *ce;
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-	dev_priv->perf.oa.specific_ctx_id_mask = 0;
+	i915->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+	i915->perf.oa.specific_ctx_id_mask = 0;
- ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+	ce = fetch_and_zero(&i915->perf.oa.pinned_ctx);
  	if (ce) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&i915->drm.struct_mutex);
  		intel_context_unpin(ce);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
  	}
  }
@@ -1359,64 +1359,64 @@ free_oa_buffer(struct drm_i915_private *i915) static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+	BUG_ON(stream != i915->perf.oa.exclusive_stream);
/*
  	 * Unset exclusive_stream first, it will be checked while disabling
  	 * the metric set on gen8+.
  	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	dev_priv->perf.oa.exclusive_stream = NULL;
-	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
+	i915->perf.oa.exclusive_stream = NULL;
+	i915->perf.oa.ops.disable_metric_set(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
- free_oa_buffer(dev_priv);
+	free_oa_buffer(i915);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	intel_runtime_pm_put(dev_priv, stream->wakeref);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	intel_runtime_pm_put(i915, stream->wakeref);
if (stream->ctx)
  		oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+	put_oa_config(i915, stream->oa_config);
- if (dev_priv->perf.oa.spurious_report_rs.missed) {
+	if (i915->perf.oa.spurious_report_rs.missed) {
  		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
-			 dev_priv->perf.oa.spurious_report_rs.missed);
+			 i915->perf.oa.spurious_report_rs.missed);
  	}
  }
-static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen7_init_oa_buffer(struct drm_i915_private *i915)
  {
-	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+	u32 gtt_offset = i915_ggtt_offset(i915->perf.oa.oa_buffer.vma);
  	unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
  	 * before OASTATUS1, but after OASTATUS2
  	 */
  	I915_WRITE(GEN7_OASTATUS2,
  		   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
-	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+	i915->perf.oa.oa_buffer.head = gtt_offset;
I915_WRITE(GEN7_OABUFFER, gtt_offset); I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ /* Mark that we need updated tail pointers to read from... */
-	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
-	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+	i915->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+	i915->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/* On Haswell we have to track which OASTATUS1 flags we've
  	 * already seen since they can't be cleared while periodic
  	 * sampling is enabled.
  	 */
-	dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+	i915->perf.oa.gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
  	 * zeroed via shmfs (and so this memset is redundant when
@@ -1429,24 +1429,24 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  	 * the assumption that new reports are being written to zeroed
  	 * memory...
  	 */
-	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+	memset(i915->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/* Maybe make ->pollin per-stream state if we support multiple
  	 * concurrent streams in the future.
  	 */
-	dev_priv->perf.oa.pollin = false;
+	i915->perf.oa.pollin = false;
  }
-static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen8_init_oa_buffer(struct drm_i915_private *i915)
  {
-	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+	u32 gtt_offset = i915_ggtt_offset(i915->perf.oa.oa_buffer.vma);
  	unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_lock_irqsave(&i915->perf.oa.oa_buffer.ptr_lock, flags);
I915_WRITE(GEN8_OASTATUS, 0);
  	I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
-	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+	i915->perf.oa.oa_buffer.head = gtt_offset;
I915_WRITE(GEN8_OABUFFER_UDW, 0); @@ -1463,17 +1463,17 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
-	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
-	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+	i915->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+	i915->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
/*
  	 * Reset state used to recognise context switches, affecting which
  	 * reports we will forward to userspace while filtering for a single
  	 * context.
  	 */
-	dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
+	i915->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+	spin_unlock_irqrestore(&i915->perf.oa.oa_buffer.ptr_lock, flags);
/*
  	 * NB: although the OA buffer will initially be allocated
@@ -1487,32 +1487,32 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  	 * the assumption that new reports are being written to zeroed
  	 * memory...
  	 */
-	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+	memset(i915->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/*
  	 * Maybe make ->pollin per-stream state if we support multiple
  	 * concurrent streams in the future.
  	 */
-	dev_priv->perf.oa.pollin = false;
+	i915->perf.oa.pollin = false;
  }
-static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+static int alloc_oa_buffer(struct drm_i915_private *i915)
  {
  	struct drm_i915_gem_object *bo;
  	struct i915_vma *vma;
  	int ret;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+	if (WARN_ON(i915->perf.oa.oa_buffer.vma))
  		return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+	ret = i915_mutex_lock_interruptible(&i915->drm);
  	if (ret)
  		return ret;
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
+	bo = i915_gem_object_create_shmem(i915, OA_BUFFER_SIZE);
  	if (IS_ERR(bo)) {
  		DRM_ERROR("Failed to allocate OA buffer\n");
  		ret = PTR_ERR(bo);
@@ -1527,18 +1527,18 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  		ret = PTR_ERR(vma);
  		goto err_unref;
  	}
-	dev_priv->perf.oa.oa_buffer.vma = vma;
+	i915->perf.oa.oa_buffer.vma = vma;
- dev_priv->perf.oa.oa_buffer.vaddr =
+	i915->perf.oa.oa_buffer.vaddr =
  		i915_gem_object_pin_map(bo, I915_MAP_WB);
-	if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
-		ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+	if (IS_ERR(i915->perf.oa.oa_buffer.vaddr)) {
+		ret = PTR_ERR(i915->perf.oa.oa_buffer.vaddr);
  		goto err_unpin;
  	}
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
-			 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
-			 dev_priv->perf.oa.oa_buffer.vaddr);
+			 i915_ggtt_offset(i915->perf.oa.oa_buffer.vma),
+			 i915->perf.oa.oa_buffer.vaddr);
goto unlock; @@ -1548,15 +1548,15 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  err_unref:
  	i915_gem_object_put(bo);
- dev_priv->perf.oa.oa_buffer.vaddr = NULL;
-	dev_priv->perf.oa.oa_buffer.vma = NULL;
+	i915->perf.oa.oa_buffer.vaddr = NULL;
+	i915->perf.oa.oa_buffer.vma = NULL;
unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  	return ret;
  }
-static void config_oa_regs(struct drm_i915_private *dev_priv,
+static void config_oa_regs(struct drm_i915_private *i915,
  			   const struct i915_oa_reg *regs,
  			   u32 n_regs)
  {
@@ -1571,7 +1571,7 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	const struct i915_oa_config *oa_config = stream->oa_config;
/* PRM:
@@ -1588,7 +1588,7 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
  	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  				  GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+	config_oa_regs(i915, oa_config->mux_regs, oa_config->mux_regs_len);
/* It apparently takes a fairly long time for a new MUX
  	 * configuration to be be applied after these register writes.
@@ -1613,13 +1613,13 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
  	 */
  	usleep_range(15000, 20000);
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
+	config_oa_regs(i915, oa_config->b_counter_regs,
  		       oa_config->b_counter_regs_len);
return 0;
  }
-static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+static void hsw_disable_metric_set(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  				  ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
@@ -1716,15 +1716,15 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
   *
   * Note: it's only the RCS/Render context that has any OA state.
   */
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+static int gen8_configure_all_contexts(struct drm_i915_private *i915,
  				       const struct i915_oa_config *oa_config)
  {
-	unsigned int map_type = i915_coherent_map_type(dev_priv);
+	unsigned int map_type = i915_coherent_map_type(i915);
  	struct i915_gem_context *ctx;
  	struct i915_request *rq;
  	int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
/*
  	 * The OA register config is setup through the context image. This image
@@ -1739,14 +1739,14 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  	 * So far the best way to work around this issue seems to be draining
  	 * the GPU from any submitted work.
  	 */
-	ret = i915_gem_wait_for_idle(dev_priv,
+	ret = i915_gem_wait_for_idle(i915,
  				     I915_WAIT_LOCKED,
  				     MAX_SCHEDULE_TIMEOUT);
  	if (ret)
  		return ret;
/* Update all contexts now that we've stalled the submission. */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+	list_for_each_entry(ctx, &i915->contexts.list, link) {
  		struct i915_gem_engines_iter it;
  		struct intel_context *ce;
@@ -1783,7 +1783,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  	 * Apply the configuration by doing one context restore of the edited
  	 * context image.
  	 */
-	rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
+	rq = i915_request_create(i915->engine[RCS0]->kernel_context);
  	if (IS_ERR(rq))
  		return PTR_ERR(rq);
@@ -1794,7 +1794,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, static int gen8_enable_metric_set(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	const struct i915_oa_config *oa_config = stream->oa_config;
  	int ret;
@@ -1821,7 +1821,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
  	 * be read back from automatically triggered reports, as part of the
  	 * RPT_ID field.
  	 */
-	if (IS_GEN_RANGE(dev_priv, 9, 11)) {
+	if (IS_GEN_RANGE(i915, 9, 11)) {
  		I915_WRITE(GEN8_OA_DEBUG,
  			   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
  					      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -1832,31 +1832,31 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
  	 * to make sure all slices/subslices are ON before writing to NOA
  	 * registers.
  	 */
-	ret = gen8_configure_all_contexts(dev_priv, oa_config);
+	ret = gen8_configure_all_contexts(i915, oa_config);
  	if (ret)
  		return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+	config_oa_regs(i915, oa_config->mux_regs, oa_config->mux_regs_len);
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
+	config_oa_regs(i915, oa_config->b_counter_regs,
  		       oa_config->b_counter_regs_len);
return 0;
  }
-static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen8_disable_metric_set(struct drm_i915_private *i915)
  {
  	/* Reset all contexts' slices/subslices configurations. */
-	gen8_configure_all_contexts(dev_priv, NULL);
+	gen8_configure_all_contexts(i915, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  				      ~GT_NOA_ENABLE));
  }
-static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen10_disable_metric_set(struct drm_i915_private *i915)
  {
  	/* Reset all contexts' slices/subslices configurations. */
-	gen8_configure_all_contexts(dev_priv, NULL);
+	gen8_configure_all_contexts(i915, NULL);
/* Make sure we disable noa to save power. */
  	I915_WRITE(RPM_CONFIG1,
@@ -1865,12 +1865,12 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen7_oa_enable(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	struct i915_gem_context *ctx = stream->ctx;
-	u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
-	bool periodic = dev_priv->perf.oa.periodic;
-	u32 period_exponent = dev_priv->perf.oa.period_exponent;
-	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+	u32 ctx_id = i915->perf.oa.specific_ctx_id;
+	bool periodic = i915->perf.oa.periodic;
+	u32 period_exponent = i915->perf.oa.period_exponent;
+	u32 report_format = i915->perf.oa.oa_buffer.format;
/*
  	 * Reset buf pointers so we don't forward reports from before now.
@@ -1881,7 +1881,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
  	 * on the assumption that certain fields are written to zeroed
  	 * memory which this helps maintains.
  	 */
-	gen7_init_oa_buffer(dev_priv);
+	gen7_init_oa_buffer(i915);
I915_WRITE(GEN7_OACONTROL,
  		   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
@@ -1895,8 +1895,8 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
static void gen8_oa_enable(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
-	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+	struct drm_i915_private *i915 = stream->i915;
+	u32 report_format = i915->perf.oa.oa_buffer.format;
/*
  	 * Reset buf pointers so we don't forward reports from before now.
@@ -1907,7 +1907,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
  	 * on the assumption that certain fields are written to zeroed
  	 * memory which this helps maintains.
  	 */
-	gen8_init_oa_buffer(dev_priv);
+	gen8_init_oa_buffer(i915);
/*
  	 * Note: we don't rely on the hardware to perform single context
@@ -1930,19 +1930,19 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
   */
  static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- dev_priv->perf.oa.ops.oa_enable(stream);
+	i915->perf.oa.ops.oa_enable(stream);
- if (dev_priv->perf.oa.periodic)
-		hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+	if (i915->perf.oa.periodic)
+		hrtimer_start(&i915->perf.oa.poll_check_timer,
  			      ns_to_ktime(POLL_PERIOD),
  			      HRTIMER_MODE_REL_PINNED);
  }
static void gen7_oa_disable(struct i915_perf_stream *stream)
  {
-	struct intel_uncore *uncore = &stream->dev_priv->uncore;
+	struct intel_uncore *uncore = &stream->i915->uncore;
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
  	if (intel_wait_for_register(uncore,
@@ -1953,7 +1953,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
  {
-	struct intel_uncore *uncore = &stream->dev_priv->uncore;
+	struct intel_uncore *uncore = &stream->i915->uncore;
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
  	if (intel_wait_for_register(uncore,
@@ -1972,12 +1972,12 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
   */
  static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- dev_priv->perf.oa.ops.oa_disable(stream);
+	i915->perf.oa.ops.oa_disable(stream);
- if (dev_priv->perf.oa.periodic)
-		hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+	if (i915->perf.oa.periodic)
+		hrtimer_cancel(&i915->perf.oa.poll_check_timer);
  }
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
@@ -2011,7 +2011,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  			       struct drm_i915_perf_open_param *param,
  			       struct perf_open_properties *props)
  {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	int format_size;
  	int ret;
@@ -2019,7 +2019,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  	 * reason then don't let userspace try their luck with config
  	 * IDs
  	 */
-	if (!dev_priv->perf.metrics_kobj) {
+	if (!i915->perf.metrics_kobj) {
  		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  		return -EINVAL;
  	}
@@ -2029,7 +2029,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  		return -EINVAL;
  	}
- if (!dev_priv->perf.oa.ops.enable_metric_set) {
+	if (!i915->perf.oa.ops.enable_metric_set) {
  		DRM_DEBUG("OA unit not supported\n");
  		return -ENODEV;
  	}
@@ -2038,7 +2038,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  	 * counter reports and marshal to the appropriate client
  	 * we currently only allow exclusive access
  	 */
-	if (dev_priv->perf.oa.exclusive_stream) {
+	if (i915->perf.oa.exclusive_stream) {
  		DRM_DEBUG("OA unit already in use\n");
  		return -EBUSY;
  	}
@@ -2059,32 +2059,32 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  	 *
  	 * Using the same limiting factors as printk_ratelimit()
  	 */
-	ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
+	ratelimit_state_init(&i915->perf.oa.spurious_report_rs,
  			     5 * HZ, 10);
  	/* Since we use a DRM_NOTE for spurious reports it would be
  	 * inconsistent to let __ratelimit() automatically print a warning for
  	 * throttling.
  	 */
-	ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
+	ratelimit_set_flags(&i915->perf.oa.spurious_report_rs,
  			    RATELIMIT_MSG_ON_RELEASE);
stream->sample_size = sizeof(struct drm_i915_perf_record_header); - format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+	format_size = i915->perf.oa.oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
  	stream->sample_size += format_size;
- dev_priv->perf.oa.oa_buffer.format_size = format_size;
-	if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+	i915->perf.oa.oa_buffer.format_size = format_size;
+	if (WARN_ON(i915->perf.oa.oa_buffer.format_size == 0))
  		return -EINVAL;
- dev_priv->perf.oa.oa_buffer.format =
-		dev_priv->perf.oa.oa_formats[props->oa_format].format;
+	i915->perf.oa.oa_buffer.format =
+		i915->perf.oa.oa_formats[props->oa_format].format;
- dev_priv->perf.oa.periodic = props->oa_periodic;
-	if (dev_priv->perf.oa.periodic)
-		dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+	i915->perf.oa.periodic = props->oa_periodic;
+	if (i915->perf.oa.periodic)
+		i915->perf.oa.period_exponent = props->oa_period_exponent;
if (stream->ctx) {
  		ret = oa_get_render_ctx_id(stream);
@@ -2094,7 +2094,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  		}
  	}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+	ret = get_oa_config(i915, props->metrics_set, &stream->oa_config);
  	if (ret) {
  		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
  		goto err_config;
@@ -2112,43 +2112,43 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
  	 *   In our case we are expecting that taking pm + FORCEWAKE
  	 *   references will effectively disable RC6.
  	 */
-	stream->wakeref = intel_runtime_pm_get(dev_priv);
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	stream->wakeref = intel_runtime_pm_get(i915);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- ret = alloc_oa_buffer(dev_priv);
+	ret = alloc_oa_buffer(i915);
  	if (ret)
  		goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+	ret = i915_mutex_lock_interruptible(&i915->drm);
  	if (ret)
  		goto err_lock;
stream->ops = &i915_oa_stream_ops;
-	dev_priv->perf.oa.exclusive_stream = stream;
+	i915->perf.oa.exclusive_stream = stream;
- ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+	ret = i915->perf.oa.ops.enable_metric_set(stream);
  	if (ret) {
  		DRM_DEBUG("Unable to enable metric set\n");
  		goto err_enable;
  	}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
return 0; err_enable:
-	dev_priv->perf.oa.exclusive_stream = NULL;
-	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	i915->perf.oa.exclusive_stream = NULL;
+	i915->perf.oa.ops.disable_metric_set(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
err_lock:
-	free_oa_buffer(dev_priv);
+	free_oa_buffer(i915);
err_oa_buf_alloc:
-	put_oa_config(dev_priv, stream->oa_config);
+	put_oa_config(i915, stream->oa_config);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	intel_runtime_pm_put(dev_priv, stream->wakeref);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	intel_runtime_pm_put(i915, stream->wakeref);
err_config:
  	if (stream->ctx)
@@ -2238,7 +2238,7 @@ static ssize_t i915_perf_read(struct file *file,
  			      loff_t *ppos)
  {
  	struct i915_perf_stream *stream = file->private_data;
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	ssize_t ret;
/* To ensure it's handled consistently we simply treat all reads of a
@@ -2261,15 +2261,15 @@ static ssize_t i915_perf_read(struct file *file,
  			if (ret)
  				return ret;
- mutex_lock(&dev_priv->perf.lock);
+			mutex_lock(&i915->perf.lock);
  			ret = i915_perf_read_locked(stream, file,
  						    buf, count, ppos);
-			mutex_unlock(&dev_priv->perf.lock);
+			mutex_unlock(&i915->perf.lock);
  		} while (ret == -EAGAIN);
  	} else {
-		mutex_lock(&dev_priv->perf.lock);
+		mutex_lock(&i915->perf.lock);
  		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
-		mutex_unlock(&dev_priv->perf.lock);
+		mutex_unlock(&i915->perf.lock);
  	}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2284,7 +2284,7 @@ static ssize_t i915_perf_read(struct file *file,
  		/* Maybe make ->pollin per-stream state if we support multiple
  		 * concurrent streams in the future.
  		 */
-		dev_priv->perf.oa.pollin = false;
+		i915->perf.oa.pollin = false;
  	}
return ret;
@@ -2292,13 +2292,13 @@ static ssize_t i915_perf_read(struct file *file,
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(hrtimer, typeof(*dev_priv),
+	struct drm_i915_private *i915 =
+		container_of(hrtimer, typeof(*i915),
  			     perf.oa.poll_check_timer);
- if (oa_buffer_check_unlocked(dev_priv)) {
-		dev_priv->perf.oa.pollin = true;
-		wake_up(&dev_priv->perf.oa.poll_wq);
+	if (oa_buffer_check_unlocked(i915)) {
+		i915->perf.oa.pollin = true;
+		wake_up(&i915->perf.oa.poll_wq);
  	}
hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
@@ -2308,7 +2308,7 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
/**
   * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @stream: An i915 perf stream
   * @file: An i915 perf stream file
   * @wait: poll() state table
@@ -2322,7 +2322,7 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
   *
   * Returns: any poll events that are ready without sleeping
   */
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+static __poll_t i915_perf_poll_locked(struct drm_i915_private *i915,
  					  struct i915_perf_stream *stream,
  					  struct file *file,
  					  poll_table *wait)
@@ -2337,7 +2337,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  	 * samples to read.
  	 */
-	if (dev_priv->perf.oa.pollin)
+	if (i915->perf.oa.pollin)
  		events |= EPOLLIN;
return events;
@@ -2359,12 +2359,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
  {
  	struct i915_perf_stream *stream = file->private_data;
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
-	ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
-	mutex_unlock(&dev_priv->perf.lock);
+	mutex_lock(&i915->perf.lock);
+	ret = i915_perf_poll_locked(i915, stream, file, wait);
+	mutex_unlock(&i915->perf.lock);
return ret;
  }
@@ -2461,12 +2461,12 @@ static long i915_perf_ioctl(struct file *file,
  			    unsigned long arg)
  {
  	struct i915_perf_stream *stream = file->private_data;
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
  	long ret;
- mutex_lock(&dev_priv->perf.lock);
+	mutex_lock(&i915->perf.lock);
  	ret = i915_perf_ioctl_locked(stream, cmd, arg);
-	mutex_unlock(&dev_priv->perf.lock);
+	mutex_unlock(&i915->perf.lock);
return ret;
  }
@@ -2511,11 +2511,11 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  static int i915_perf_release(struct inode *inode, struct file *file)
  {
  	struct i915_perf_stream *stream = file->private_data;
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->i915;
- mutex_lock(&dev_priv->perf.lock);
+	mutex_lock(&i915->perf.lock);
  	i915_perf_destroy_locked(stream);
-	mutex_unlock(&dev_priv->perf.lock);
+	mutex_unlock(&i915->perf.lock);
return 0;
  }
@@ -2537,7 +2537,7 @@ static const struct file_operations fops = {
/**
   * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
   * @props: individually validated u64 property value pairs
   * @file: drm file
@@ -2560,7 +2560,7 @@ static const struct file_operations fops = {
   * Returns: zero on success or a negative error code.
   */
  static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct drm_i915_private *i915,
  			    struct drm_i915_perf_open_param *param,
  			    struct perf_open_properties *props,
  			    struct drm_file *file)
@@ -2599,7 +2599,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
  	 * enable the OA unit by default.
  	 */
-	if (IS_HASWELL(dev_priv) && specific_ctx)
+	if (IS_HASWELL(i915) && specific_ctx)
  		privileged_op = false;
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
@@ -2620,7 +2620,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  		goto err_ctx;
  	}
- stream->dev_priv = dev_priv;
+	stream->i915 = i915;
  	stream->ctx = specific_ctx;
ret = i915_oa_stream_init(stream, param, props);
@@ -2636,7 +2636,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  		goto err_flags;
  	}
- list_add(&stream->link, &dev_priv->perf.streams);
+	list_add(&stream->link, &i915->perf.streams);
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  		f_flags |= O_CLOEXEC;
@@ -2668,15 +2668,15 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  	return ret;
  }
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct drm_i915_private *i915, int exponent)
  {
  	return div64_u64(1000000000ULL * (2ULL << exponent),
-			 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+			 1000ULL * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
  }
/**
   * read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @uprops: The array of u64 key value pairs given by userspace
   * @n_props: The number of key value pairs expected in @uprops
   * @props: The stream configuration built up while validating properties
@@ -2689,7 +2689,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
   * we shouldn't validate or assume anything about ordering here. This doesn't
   * rule out defining new properties with ordering requirements in the future.
   */
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct drm_i915_private *i915,
  				    u64 __user *uprops,
  				    u32 n_props,
  				    struct perf_open_properties *props)
@@ -2755,7 +2755,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  					  value);
  				return -EINVAL;
  			}
-			if (!dev_priv->perf.oa.oa_formats[value].size) {
+			if (!i915->perf.oa.oa_formats[value].size) {
  				DRM_DEBUG("Unsupported OA report format %llu\n",
  					  value);
  				return -EINVAL;
@@ -2776,7 +2776,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  			 */
BUILD_BUG_ON(sizeof(oa_period) != 8);
-			oa_period = oa_exponent_to_ns(dev_priv, value);
+			oa_period = oa_exponent_to_ns(i915, value);
/* This check is primarily to ensure that oa_period <=
  			 * UINT32_MAX (before passing to do_div which only
@@ -2839,13 +2839,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  			 struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *i915 = dev->dev_private;
  	struct drm_i915_perf_open_param *param = data;
  	struct perf_open_properties props;
  	u32 known_open_flags;
  	int ret;
- if (!dev_priv->perf.initialized) {
+	if (!i915->perf.initialized) {
  		DRM_DEBUG("i915 perf interface not available for this system\n");
  		return -ENOTSUPP;
  	}
@@ -2858,124 +2858,124 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  		return -EINVAL;
  	}
- ret = read_properties_unlocked(dev_priv,
+	ret = read_properties_unlocked(i915,
  				       u64_to_user_ptr(param->properties_ptr),
  				       param->num_properties,
  				       &props);
  	if (ret)
  		return ret;
- mutex_lock(&dev_priv->perf.lock);
-	ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
-	mutex_unlock(&dev_priv->perf.lock);
+	mutex_lock(&i915->perf.lock);
+	ret = i915_perf_open_ioctl_locked(i915, param, &props, file);
+	mutex_unlock(&i915->perf.lock);
return ret;
  }
/**
   * i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * In particular OA metric sets are advertised under a sysfs metrics/
   * directory allowing userspace to enumerate valid IDs that can be
   * used to open an i915-perf stream.
   */
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
  {
  	int ret;
- if (!dev_priv->perf.initialized)
+	if (!i915->perf.initialized)
  		return;
/* To be sure we're synchronized with an attempted
  	 * i915_perf_open_ioctl(); considering that we register after
  	 * being exposed to userspace.
  	 */
-	mutex_lock(&dev_priv->perf.lock);
+	mutex_lock(&i915->perf.lock);
- dev_priv->perf.metrics_kobj =
+	i915->perf.metrics_kobj =
  		kobject_create_and_add("metrics",
-				       &dev_priv->drm.primary->kdev->kobj);
-	if (!dev_priv->perf.metrics_kobj)
+				       &i915->drm.primary->kdev->kobj);
+	if (!i915->perf.metrics_kobj)
  		goto exit;
- sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
-
-	if (INTEL_GEN(dev_priv) >= 11) {
-		i915_perf_load_test_config_icl(dev_priv);
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		i915_perf_load_test_config_cnl(dev_priv);
-	} else if (IS_COFFEELAKE(dev_priv)) {
-		if (IS_CFL_GT2(dev_priv))
-			i915_perf_load_test_config_cflgt2(dev_priv);
-		if (IS_CFL_GT3(dev_priv))
-			i915_perf_load_test_config_cflgt3(dev_priv);
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		i915_perf_load_test_config_glk(dev_priv);
-	} else if (IS_KABYLAKE(dev_priv)) {
-		if (IS_KBL_GT2(dev_priv))
-			i915_perf_load_test_config_kblgt2(dev_priv);
-		else if (IS_KBL_GT3(dev_priv))
-			i915_perf_load_test_config_kblgt3(dev_priv);
-	} else if (IS_BROXTON(dev_priv)) {
-		i915_perf_load_test_config_bxt(dev_priv);
-	} else if (IS_SKYLAKE(dev_priv)) {
-		if (IS_SKL_GT2(dev_priv))
-			i915_perf_load_test_config_sklgt2(dev_priv);
-		else if (IS_SKL_GT3(dev_priv))
-			i915_perf_load_test_config_sklgt3(dev_priv);
-		else if (IS_SKL_GT4(dev_priv))
-			i915_perf_load_test_config_sklgt4(dev_priv);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		i915_perf_load_test_config_chv(dev_priv);
-	} else if (IS_BROADWELL(dev_priv)) {
-		i915_perf_load_test_config_bdw(dev_priv);
-	} else if (IS_HASWELL(dev_priv)) {
-		i915_perf_load_test_config_hsw(dev_priv);
-}
-
-	if (dev_priv->perf.oa.test_config.id == 0)
+	sysfs_attr_init(&i915->perf.oa.test_config.sysfs_metric_id.attr);
+
+	if (INTEL_GEN(i915) >= 11) {
+		i915_perf_load_test_config_icl(i915);
+	} else if (IS_CANNONLAKE(i915)) {
+		i915_perf_load_test_config_cnl(i915);
+	} else if (IS_COFFEELAKE(i915)) {
+		if (IS_CFL_GT2(i915))
+			i915_perf_load_test_config_cflgt2(i915);
+		if (IS_CFL_GT3(i915))
+			i915_perf_load_test_config_cflgt3(i915);
+	} else if (IS_GEMINILAKE(i915)) {
+		i915_perf_load_test_config_glk(i915);
+	} else if (IS_KABYLAKE(i915)) {
+		if (IS_KBL_GT2(i915))
+			i915_perf_load_test_config_kblgt2(i915);
+		else if (IS_KBL_GT3(i915))
+			i915_perf_load_test_config_kblgt3(i915);
+	} else if (IS_BROXTON(i915)) {
+		i915_perf_load_test_config_bxt(i915);
+	} else if (IS_SKYLAKE(i915)) {
+		if (IS_SKL_GT2(i915))
+			i915_perf_load_test_config_sklgt2(i915);
+		else if (IS_SKL_GT3(i915))
+			i915_perf_load_test_config_sklgt3(i915);
+		else if (IS_SKL_GT4(i915))
+			i915_perf_load_test_config_sklgt4(i915);
+	} else if (IS_CHERRYVIEW(i915)) {
+		i915_perf_load_test_config_chv(i915);
+	} else if (IS_BROADWELL(i915)) {
+		i915_perf_load_test_config_bdw(i915);
+	} else if (IS_HASWELL(i915)) {
+		i915_perf_load_test_config_hsw(i915);
+}
+
+	if (i915->perf.oa.test_config.id == 0)
  		goto sysfs_error;
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
-				 &dev_priv->perf.oa.test_config.sysfs_metric);
+	ret = sysfs_create_group(i915->perf.metrics_kobj,
+				 &i915->perf.oa.test_config.sysfs_metric);
  	if (ret)
  		goto sysfs_error;
- atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
+	atomic_set(&i915->perf.oa.test_config.ref_count, 1);
goto exit; sysfs_error:
-	kobject_put(dev_priv->perf.metrics_kobj);
-	dev_priv->perf.metrics_kobj = NULL;
+	kobject_put(i915->perf.metrics_kobj);
+	i915->perf.metrics_kobj = NULL;
exit:
-	mutex_unlock(&dev_priv->perf.lock);
+	mutex_unlock(&i915->perf.lock);
  }
/**
   * i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * i915-perf state cleanup is split up into an 'unregister' and
   * 'deinit' phase where the interface is first hidden from
   * userspace by i915_perf_unregister() before cleaning up
   * remaining state in i915_perf_fini().
   */
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
  {
-	if (!dev_priv->perf.metrics_kobj)
+	if (!i915->perf.metrics_kobj)
  		return;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
-			   &dev_priv->perf.oa.test_config.sysfs_metric);
+	sysfs_remove_group(i915->perf.metrics_kobj,
+			   &i915->perf.oa.test_config.sysfs_metric);
- kobject_put(dev_priv->perf.metrics_kobj);
-	dev_priv->perf.metrics_kobj = NULL;
+	kobject_put(i915->perf.metrics_kobj);
+	i915->perf.metrics_kobj = NULL;
  }
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct drm_i915_private *i915, u32 addr)
  {
  	static const i915_reg_t flex_eu_regs[] = {
  		EU_PERF_CNTL0,
@@ -2995,7 +2995,7 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
  	return false;
  }
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *i915, u32 addr)
  {
  	return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
  		addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
@@ -3005,7 +3005,7 @@ static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32
  		 addr <= i915_mmio_reg_offset(OACEC7_1));
  }
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen7_is_valid_mux_addr(struct drm_i915_private *i915, u32 addr)
  {
  	return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
  		(addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
@@ -3016,34 +3016,34 @@ static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  		 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
  }
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_mux_addr(struct drm_i915_private *i915, u32 addr)
  {
-	return gen7_is_valid_mux_addr(dev_priv, addr) ||
+	return gen7_is_valid_mux_addr(i915, addr) ||
  		addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
  		(addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
  		 addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
  }
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen10_is_valid_mux_addr(struct drm_i915_private *i915, u32 addr)
  {
-	return gen8_is_valid_mux_addr(dev_priv, addr) ||
+	return gen8_is_valid_mux_addr(i915, addr) ||
  		addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
  		(addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
  		 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
  }
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct drm_i915_private *i915, u32 addr)
  {
-	return gen7_is_valid_mux_addr(dev_priv, addr) ||
+	return gen7_is_valid_mux_addr(i915, addr) ||
  		(addr >= 0x25100 && addr <= 0x2FF90) ||
  		(addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
  		 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
  		addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
  }
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct drm_i915_private *i915, u32 addr)
  {
-	return gen7_is_valid_mux_addr(dev_priv, addr) ||
+	return gen7_is_valid_mux_addr(i915, addr) ||
  		(addr >= 0x182300 && addr <= 0x1823A4);
  }
@@ -3066,8 +3066,8 @@ static u32 mask_reg_value(u32 reg, u32 val)
  	return val;
  }
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
-					 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *i915,
+					 bool (*is_valid)(struct drm_i915_private *i915, u32 addr),
  					 u32 __user *regs,
  					 u32 n_regs)
  {
@@ -3097,7 +3097,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
  		if (err)
  			goto addr_err;
- if (!is_valid(dev_priv, addr)) {
+		if (!is_valid(i915, addr)) {
  			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
  			err = -EINVAL;
  			goto addr_err;
@@ -3130,7 +3130,7 @@ static ssize_t show_dynamic_id(struct device *dev,
  	return sprintf(buf, "%d\n", oa_config->id);
  }
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *i915,
  					 struct i915_oa_config *oa_config)
  {
  	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3145,7 +3145,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  	oa_config->sysfs_metric.name = oa_config->uuid;
  	oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+	return sysfs_create_group(i915->perf.metrics_kobj,
  				  &oa_config->sysfs_metric);
  }
@@ -3165,17 +3165,17 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  			       struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *i915 = dev->dev_private;
  	struct drm_i915_perf_oa_config *args = data;
  	struct i915_oa_config *oa_config, *tmp;
  	int err, id;
- if (!dev_priv->perf.initialized) {
+	if (!i915->perf.initialized) {
  		DRM_DEBUG("i915 perf interface not available for this system\n");
  		return -ENOTSUPP;
  	}
- if (!dev_priv->perf.metrics_kobj) {
+	if (!i915->perf.metrics_kobj) {
  		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  		return -EINVAL;
  	}
@@ -3213,8 +3213,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->mux_regs_len = args->n_mux_regs;
  	oa_config->mux_regs =
-		alloc_oa_regs(dev_priv,
-			      dev_priv->perf.oa.ops.is_valid_mux_reg,
+		alloc_oa_regs(i915,
+			      i915->perf.oa.ops.is_valid_mux_reg,
  			      u64_to_user_ptr(args->mux_regs_ptr),
  			      args->n_mux_regs);
@@ -3226,8 +3226,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, oa_config->b_counter_regs_len = args->n_boolean_regs;
  	oa_config->b_counter_regs =
-		alloc_oa_regs(dev_priv,
-			      dev_priv->perf.oa.ops.is_valid_b_counter_reg,
+		alloc_oa_regs(i915,
+			      i915->perf.oa.ops.is_valid_b_counter_reg,
  			      u64_to_user_ptr(args->boolean_regs_ptr),
  			      args->n_boolean_regs);
@@ -3237,7 +3237,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  		goto reg_err;
  	}
- if (INTEL_GEN(dev_priv) < 8) {
+	if (INTEL_GEN(i915) < 8) {
  		if (args->n_flex_regs != 0) {
  			err = -EINVAL;
  			goto reg_err;
@@ -3245,8 +3245,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  	} else {
  		oa_config->flex_regs_len = args->n_flex_regs;
  		oa_config->flex_regs =
-			alloc_oa_regs(dev_priv,
-				      dev_priv->perf.oa.ops.is_valid_flex_reg,
+			alloc_oa_regs(i915,
+				      i915->perf.oa.ops.is_valid_flex_reg,
  				      u64_to_user_ptr(args->flex_regs_ptr),
  				      args->n_flex_regs);
@@ -3257,14 +3257,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  		}
  	}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+	err = mutex_lock_interruptible(&i915->perf.metrics_lock);
  	if (err)
  		goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
  	 * too costly.
  	 */
-	idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+	idr_for_each_entry(&i915->perf.metrics_idr, tmp, id) {
  		if (!strcmp(tmp->uuid, oa_config->uuid)) {
  			DRM_DEBUG("OA config already exists with this uuid\n");
  			err = -EADDRINUSE;
@@ -3272,14 +3272,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  		}
  	}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+	err = create_dynamic_oa_sysfs_entry(i915, oa_config);
  	if (err) {
  		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  		goto sysfs_err;
  	}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
-	oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+	oa_config->id = idr_alloc(&i915->perf.metrics_idr,
  				  oa_config, 2,
  				  0, GFP_KERNEL);
  	if (oa_config->id < 0) {
@@ -3288,16 +3288,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  		goto sysfs_err;
  	}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+	mutex_unlock(&i915->perf.metrics_lock);
DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); return oa_config->id; sysfs_err:
-	mutex_unlock(&dev_priv->perf.metrics_lock);
+	mutex_unlock(&i915->perf.metrics_lock);
  reg_err:
-	put_oa_config(dev_priv, oa_config);
+	put_oa_config(i915, oa_config);
  	DRM_DEBUG("Failed to add new OA config\n");
  	return err;
  }
@@ -3316,12 +3316,12 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  				  struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *i915 = dev->dev_private;
  	u64 *arg = data;
  	struct i915_oa_config *oa_config;
  	int ret;
- if (!dev_priv->perf.initialized) {
+	if (!i915->perf.initialized) {
  		DRM_DEBUG("i915 perf interface not available for this system\n");
  		return -ENOTSUPP;
  	}
@@ -3331,11 +3331,11 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  		return -EACCES;
  	}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+	ret = mutex_lock_interruptible(&i915->perf.metrics_lock);
  	if (ret)
  		goto lock_err;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+	oa_config = idr_find(&i915->perf.metrics_idr, *arg);
  	if (!oa_config) {
  		DRM_DEBUG("Failed to remove unknown OA config\n");
  		ret = -ENOENT;
@@ -3344,17 +3344,17 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(*arg != oa_config->id); - sysfs_remove_group(dev_priv->perf.metrics_kobj,
+	sysfs_remove_group(i915->perf.metrics_kobj,
  			   &oa_config->sysfs_metric);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+	idr_remove(&i915->perf.metrics_idr, *arg);
DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); - put_oa_config(dev_priv, oa_config);
+	put_oa_config(i915, oa_config);
config_err:
-	mutex_unlock(&dev_priv->perf.metrics_lock);
+	mutex_unlock(&i915->perf.metrics_lock);
  lock_err:
  	return ret;
  }
@@ -3403,135 +3403,135 @@ static struct ctl_table dev_root[] = {
/**
   * i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Initializes i915-perf state without exposing anything to userspace.
   *
   * Note: i915-perf initialization is split into an 'init' and 'register'
   * phase with the i915_perf_register() exposing state to userspace.
   */
-void i915_perf_init(struct drm_i915_private *dev_priv)
+void i915_perf_init(struct drm_i915_private *i915)
  {
-	if (IS_HASWELL(dev_priv)) {
-		dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+	if (IS_HASWELL(i915)) {
+		i915->perf.oa.ops.is_valid_b_counter_reg =
  			gen7_is_valid_b_counter_addr;
-		dev_priv->perf.oa.ops.is_valid_mux_reg =
+		i915->perf.oa.ops.is_valid_mux_reg =
  			hsw_is_valid_mux_addr;
-		dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
-		dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
-		dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
-		dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
-		dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
-		dev_priv->perf.oa.ops.read = gen7_oa_read;
-		dev_priv->perf.oa.ops.oa_hw_tail_read =
+		i915->perf.oa.ops.is_valid_flex_reg = NULL;
+		i915->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
+		i915->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
+		i915->perf.oa.ops.oa_enable = gen7_oa_enable;
+		i915->perf.oa.ops.oa_disable = gen7_oa_disable;
+		i915->perf.oa.ops.read = gen7_oa_read;
+		i915->perf.oa.ops.oa_hw_tail_read =
  			gen7_oa_hw_tail_read;
- dev_priv->perf.oa.oa_formats = hsw_oa_formats;
-	} else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+		i915->perf.oa.oa_formats = hsw_oa_formats;
+	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
  		/* Note: that although we could theoretically also support the
  		 * legacy ringbuffer mode on BDW (and earlier iterations of
  		 * this driver, before upstreaming did this) it didn't seem
  		 * worth the complexity to maintain now that BDW+ enable
  		 * execlist mode by default.
  		 */
-		dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
+		i915->perf.oa.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
-		dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
-		dev_priv->perf.oa.ops.read = gen8_oa_read;
-		dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+		i915->perf.oa.ops.oa_enable = gen8_oa_enable;
+		i915->perf.oa.ops.oa_disable = gen8_oa_disable;
+		i915->perf.oa.ops.read = gen8_oa_read;
+		i915->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN_RANGE(dev_priv, 8, 9)) {
-			dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+		if (IS_GEN_RANGE(i915, 8, 9)) {
+			i915->perf.oa.ops.is_valid_b_counter_reg =
  				gen7_is_valid_b_counter_addr;
-			dev_priv->perf.oa.ops.is_valid_mux_reg =
+			i915->perf.oa.ops.is_valid_mux_reg =
  				gen8_is_valid_mux_addr;
-			dev_priv->perf.oa.ops.is_valid_flex_reg =
+			i915->perf.oa.ops.is_valid_flex_reg =
  				gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
-				dev_priv->perf.oa.ops.is_valid_mux_reg =
+			if (IS_CHERRYVIEW(i915)) {
+				i915->perf.oa.ops.is_valid_mux_reg =
  					chv_is_valid_mux_addr;
  			}
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
-			dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+			i915->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+			i915->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
- if (IS_GEN(dev_priv, 8)) {
-				dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
-				dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+			if (IS_GEN(i915, 8)) {
+				i915->perf.oa.ctx_oactxctrl_offset = 0x120;
+				i915->perf.oa.ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+				i915->perf.oa.gen8_valid_ctx_bit = (1<<25);
  			} else {
-				dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
-				dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+				i915->perf.oa.ctx_oactxctrl_offset = 0x128;
+				i915->perf.oa.ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+				i915->perf.oa.gen8_valid_ctx_bit = (1<<16);
  			}
-		} else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
-			dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+		} else if (IS_GEN_RANGE(i915, 10, 11)) {
+			i915->perf.oa.ops.is_valid_b_counter_reg =
  				gen7_is_valid_b_counter_addr;
-			dev_priv->perf.oa.ops.is_valid_mux_reg =
+			i915->perf.oa.ops.is_valid_mux_reg =
  				gen10_is_valid_mux_addr;
-			dev_priv->perf.oa.ops.is_valid_flex_reg =
+			i915->perf.oa.ops.is_valid_flex_reg =
  				gen8_is_valid_flex_addr;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
-			dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+			i915->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+			i915->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
-			dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+			i915->perf.oa.ctx_oactxctrl_offset = 0x128;
+			i915->perf.oa.ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+			i915->perf.oa.gen8_valid_ctx_bit = (1<<16);
  		}
  	}
- if (dev_priv->perf.oa.ops.enable_metric_set) {
-		hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
+	if (i915->perf.oa.ops.enable_metric_set) {
+		hrtimer_init(&i915->perf.oa.poll_check_timer,
  				CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-		dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
-		init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
+		i915->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
+		init_waitqueue_head(&i915->perf.oa.poll_wq);
- INIT_LIST_HEAD(&dev_priv->perf.streams);
-		mutex_init(&dev_priv->perf.lock);
-		spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
+		INIT_LIST_HEAD(&i915->perf.streams);
+		mutex_init(&i915->perf.lock);
+		spin_lock_init(&i915->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
-			(RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
-		dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+			(RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+		i915->perf.sysctl_header = register_sysctl_table(dev_root);
- mutex_init(&dev_priv->perf.metrics_lock);
-		idr_init(&dev_priv->perf.metrics_idr);
+		mutex_init(&i915->perf.metrics_lock);
+		idr_init(&i915->perf.metrics_idr);
- dev_priv->perf.initialized = true;
+		i915->perf.initialized = true;
  	}
  }
static int destroy_config(int id, void *p, void *data)
  {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
  	struct i915_oa_config *oa_config = p;
- put_oa_config(dev_priv, oa_config);
+	put_oa_config(i915, oa_config);
return 0;
  }
/**
   * i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   */
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
  {
-	if (!dev_priv->perf.initialized)
+	if (!i915->perf.initialized)
  		return;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
-	idr_destroy(&dev_priv->perf.metrics_idr);
+	idr_for_each(&i915->perf.metrics_idr, destroy_config, i915);
+	idr_destroy(&i915->perf.metrics_idr);
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+	unregister_sysctl_table(i915->perf.sysctl_header);
- memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+	memset(&i915->perf.oa.ops, 0, sizeof(i915->perf.oa.ops));
- dev_priv->perf.initialized = false;
+	i915->perf.initialized = false;
  }
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index eb9c0e0e545c..c4861c12100c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -159,24 +159,24 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
  }
static void
-engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	intel_wakeref_t wakeref;
  	unsigned long flags;
- if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
  		return;
wakeref = 0;
-	if (READ_ONCE(dev_priv->gt.awake))
-		wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+	if (READ_ONCE(i915->gt.awake))
+		wakeref = intel_runtime_pm_get_if_in_use(i915);
  	if (!wakeref)
  		return;
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-	for_each_engine(engine, dev_priv, id) {
+	spin_lock_irqsave(&i915->uncore.lock, flags);
+	for_each_engine(engine, i915, id) {
  		struct intel_engine_pmu *pmu = &engine->pmu;
  		bool busy;
  		u32 val;
@@ -205,9 +205,9 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
  		if (busy)
  			add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
  	}
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+	spin_unlock_irqrestore(&i915->uncore.lock, flags);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  }
static void
@@ -217,33 +217,33 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
  }
static void
-frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+frequency_sample(struct drm_i915_private *i915, unsigned int period_ns)
  {
-	if (dev_priv->pmu.enable &
+	if (i915->pmu.enable &
  	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
  		u32 val;
- val = dev_priv->gt_pm.rps.cur_freq;
-		if (dev_priv->gt.awake) {
+		val = i915->gt_pm.rps.cur_freq;
+		if (i915->gt.awake) {
  			intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
-				val = intel_uncore_read_notrace(&dev_priv->uncore,
+			with_intel_runtime_pm_if_in_use(i915, wakeref) {
+				val = intel_uncore_read_notrace(&i915->uncore,
  								GEN6_RPSTAT1);
-				val = intel_get_cagf(dev_priv, val);
+				val = intel_get_cagf(i915, val);
  			}
  		}
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-				intel_gpu_freq(dev_priv, val),
+		add_sample_mult(&i915->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+				intel_gpu_freq(i915, val),
  				period_ns / 1000);
  	}
- if (dev_priv->pmu.enable &
+	if (i915->pmu.enable &
  	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
-		add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
-				intel_gpu_freq(dev_priv,
-					       dev_priv->gt_pm.rps.cur_freq),
+		add_sample_mult(&i915->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+				intel_gpu_freq(i915,
+					       i915->gt_pm.rps.cur_freq),
  				period_ns / 1000);
  	}
  }
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7b7016171057..87606362b56f 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,10 +31,10 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
  	return 0;
  }
-static int query_topology_info(struct drm_i915_private *dev_priv,
+static int query_topology_info(struct drm_i915_private *i915,
  			       struct drm_i915_query_item *query_item)
  {
-	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	struct drm_i915_query_topology_info topo;
  	u32 slice_length, subslice_length, eu_length, total_length;
  	u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
@@ -143,7 +143,7 @@ query_engine_info(struct drm_i915_private *i915,
  	return len;
  }
-static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
+static int (* const i915_query_funcs[])(struct drm_i915_private *i915,
  					struct drm_i915_query_item *query_item) = {
  	query_topology_info,
  	query_engine_info,
@@ -151,7 +151,7 @@ static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_query *args = data;
  	struct drm_i915_query_item __user *user_item_ptr =
  		u64_to_user_ptr(args->items_ptr);
@@ -180,7 +180,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  		if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
  			func_idx = array_index_nospec(func_idx,
  						      ARRAY_SIZE(i915_query_funcs));
-			ret = i915_query_funcs[func_idx](dev_priv, &item);
+			ret = i915_query_funcs[func_idx](i915, &item);
  		}
/* Only write the length back to userspace if they differ. */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index edf9f93934a1..b522970bdf74 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -205,7 +205,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  #define VLV_MIPI_BASE			VLV_DISPLAY_BASE
  #define BXT_MIPI_BASE			0x60000
-#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
+#define DISPLAY_MMIO_BASE(i915)	(INTEL_INFO(i915)->display_mmio_offset)
/*
   * Given the first two numbers __a and __b of arbitrarily many evenly spaced
@@ -247,15 +247,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
   * Device info offset array based helpers for groups of registers with unevenly
   * spaced base offsets.
   */
-#define _MMIO_PIPE2(pipe, reg)		_MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
-					      INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
-					      DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(pipe, reg)		_MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
-					      INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
-					      DISPLAY_MMIO_BASE(dev_priv))
-#define _CURSOR2(pipe, reg)		_MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
-					      INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
-					      DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_PIPE2(pipe, reg)		_MMIO(INTEL_INFO(i915)->pipe_offsets[pipe] - \
+					      INTEL_INFO(i915)->pipe_offsets[PIPE_A] + (reg) + \
+					      DISPLAY_MMIO_BASE(i915))
+#define _MMIO_TRANS2(pipe, reg)		_MMIO(INTEL_INFO(i915)->trans_offsets[(pipe)] - \
+					      INTEL_INFO(i915)->trans_offsets[TRANSCODER_A] + (reg) + \
+					      DISPLAY_MMIO_BASE(i915))
+#define _CURSOR2(pipe, reg)		_MMIO(INTEL_INFO(i915)->cursor_offsets[(pipe)] - \
+					      INTEL_INFO(i915)->cursor_offsets[PIPE_A] + (reg) + \
+					      DISPLAY_MMIO_BASE(i915))
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
  #define _MASKED_FIELD(mask, value) ({					   \
@@ -1307,7 +1307,7 @@ enum i915_power_well_id {
  #define  DPIO_CMNRST			(1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
-#define DPIO_PHY_IOSF_PORT(phy)		(dev_priv->dpio_phy_iosf_port[phy])
+#define DPIO_PHY_IOSF_PORT(phy)		(i915->dpio_phy_iosf_port[phy])
/*
   * Per pipe/PLL DPIO regs
@@ -3004,9 +3004,9 @@ enum i915_power_well_id {
  #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
  #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
-#define GT_PARITY_ERROR(dev_priv) \
+#define GT_PARITY_ERROR(i915) \
  	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
-	 (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+	 (IS_HASWELL(i915) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
  #define ILK_BSD_USER_INTERRUPT				(1 << 5)
@@ -3195,7 +3195,7 @@ enum i915_power_well_id {
  /*
   * GPIO regs
   */
-#define GPIO(gpio)		_MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
+#define GPIO(gpio)		_MMIO(i915->gpio_mmio_base + 0x5010 + \
  				      4 * (gpio))
# define GPIO_CLOCK_DIR_MASK (1 << 0)
@@ -3213,7 +3213,7 @@ enum i915_power_well_id {
  # define GPIO_DATA_VAL_IN		(1 << 12)
  # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
-#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
+#define GMBUS0			_MMIO(i915->gpio_mmio_base + 0x5100) /* clock/port select */
  #define   GMBUS_AKSV_SELECT	(1 << 11)
  #define   GMBUS_RATE_100KHZ	(0 << 8)
  #define   GMBUS_RATE_50KHZ	(1 << 8)
@@ -3240,7 +3240,7 @@ enum i915_power_well_id {
  #define   GMBUS_PIN_12_TC4_ICP	12
#define GMBUS_NUM_PINS 13 /* including 0 */
-#define GMBUS1			_MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
+#define GMBUS1			_MMIO(i915->gpio_mmio_base + 0x5104) /* command/status */
  #define   GMBUS_SW_CLR_INT	(1 << 31)
  #define   GMBUS_SW_RDY		(1 << 30)
  #define   GMBUS_ENT		(1 << 29) /* enable timeout */
@@ -3255,7 +3255,7 @@ enum i915_power_well_id {
  #define   GMBUS_SLAVE_ADDR_SHIFT 1
  #define   GMBUS_SLAVE_READ	(1 << 0)
  #define   GMBUS_SLAVE_WRITE	(0 << 0)
-#define GMBUS2			_MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
+#define GMBUS2			_MMIO(i915->gpio_mmio_base + 0x5108) /* status */
  #define   GMBUS_INUSE		(1 << 15)
  #define   GMBUS_HW_WAIT_PHASE	(1 << 14)
  #define   GMBUS_STALL_TIMEOUT	(1 << 13)
@@ -3263,22 +3263,22 @@ enum i915_power_well_id {
  #define   GMBUS_HW_RDY		(1 << 11)
  #define   GMBUS_SATOER		(1 << 10)
  #define   GMBUS_ACTIVE		(1 << 9)
-#define GMBUS3			_MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
-#define GMBUS4			_MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
+#define GMBUS3			_MMIO(i915->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
+#define GMBUS4			_MMIO(i915->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
  #define   GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
  #define   GMBUS_NAK_EN		(1 << 3)
  #define   GMBUS_IDLE_EN		(1 << 2)
  #define   GMBUS_HW_WAIT_EN	(1 << 1)
  #define   GMBUS_HW_RDY_EN	(1 << 0)
-#define GMBUS5			_MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
+#define GMBUS5			_MMIO(i915->gpio_mmio_base + 0x5120) /* byte index */
  #define   GMBUS_2BYTE_INDEX_EN	(1 << 31)
/*
   * Clock control & power management
   */
-#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
-#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
-#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
+#define _DPLL_A (DISPLAY_MMIO_BASE(i915) + 0x6014)
+#define _DPLL_B (DISPLAY_MMIO_BASE(i915) + 0x6018)
+#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(i915) + 0x6030)
  #define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
@@ -3375,9 +3375,9 @@ enum i915_power_well_id {
  #define   SDVO_MULTIPLIER_SHIFT_HIRES		4
  #define   SDVO_MULTIPLIER_SHIFT_VGA		0
-#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
-#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
-#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
+#define _DPLL_A_MD (DISPLAY_MMIO_BASE(i915) + 0x601c)
+#define _DPLL_B_MD (DISPLAY_MMIO_BASE(i915) + 0x6020)
+#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(i915) + 0x603c)
  #define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
@@ -3449,7 +3449,7 @@ enum i915_power_well_id {
  #define  DSTATE_PLL_D3_OFF			(1 << 3)
  #define  DSTATE_GFX_CLOCK_GATING		(1 << 1)
  #define  DSTATE_DOT_CLOCK_GATING		(1 << 0)
-#define DSPCLK_GATE_D	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
+#define DSPCLK_GATE_D	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x6200)
  # define DPUNIT_B_CLOCK_GATE_DISABLE		(1 << 30) /* 965 */
  # define VSUNIT_CLOCK_GATE_DISABLE		(1 << 29) /* 965 */
  # define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* 965 */
@@ -3589,7 +3589,7 @@ enum i915_power_well_id {
  #define _PALETTE_A		0xa000
  #define _PALETTE_B		0xa800
  #define _CHV_PALETTE_C		0xc000
-#define PALETTE(pipe, i)	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+#define PALETTE(pipe, i)	_MMIO(DISPLAY_MMIO_BASE(i915) + \
  				      _PICK((pipe), _PALETTE_A,		\
  					    _PALETTE_B, _CHV_PALETTE_C) + \
  				      (i) * 4)
@@ -3936,8 +3936,8 @@ enum i915_power_well_id {
  #define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
  #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
  #define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
-				(IS_GEN9_LP(dev_priv) ? \
+#define GT_INTERVAL_FROM_US(i915, us) (INTEL_GEN(i915) >= 9 ? \
+				(IS_GEN9_LP(i915) ? \
  				INTERVAL_0_833_US(us) : \
  				INTERVAL_1_33_US(us)) : \
  				INTERVAL_1_28_US(us))
@@ -3945,8 +3945,8 @@ enum i915_power_well_id {
  #define INTERVAL_1_28_TO_US(interval)  (((interval) << 7) / 100)
  #define INTERVAL_1_33_TO_US(interval)  (((interval) << 2) / 3)
  #define INTERVAL_0_833_TO_US(interval) (((interval) * 5)  / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
-                           (IS_GEN9_LP(dev_priv) ? \
+#define GT_PM_INTERVAL_TO_US(i915, interval) (INTEL_GEN(i915) >= 9 ? \
+                           (IS_GEN9_LP(i915) ? \
                             INTERVAL_0_833_TO_US(interval) : \
                             INTERVAL_1_33_TO_US(interval)) : \
                             INTERVAL_1_28_TO_US(interval))
@@ -4219,7 +4219,7 @@ enum {
  /* HSW+ eDP PSR registers */
  #define HSW_EDP_PSR_BASE	0x64800
  #define BDW_EDP_PSR_BASE	0x6f800
-#define EDP_PSR_CTL				_MMIO(dev_priv->psr_mmio_base + 0)
+#define EDP_PSR_CTL				_MMIO(i915->psr_mmio_base + 0)
  #define   EDP_PSR_ENABLE			(1 << 31)
  #define   BDW_PSR_SINGLE_FRAME			(1 << 30)
  #define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK	(1 << 29) /* SW can't modify */
@@ -4256,16 +4256,16 @@ enum {
  #define   EDP_PSR_TRANSCODER_A_SHIFT		8
  #define   EDP_PSR_TRANSCODER_EDP_SHIFT		0
-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define EDP_PSR_AUX_CTL				_MMIO(i915->psr_mmio_base + 0x10)
  #define   EDP_PSR_AUX_CTL_TIME_OUT_MASK		(3 << 26)
  #define   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK	(0x1f << 20)
  #define   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK	(0xf << 16)
  #define   EDP_PSR_AUX_CTL_ERROR_INTERRUPT	(1 << 11)
  #define   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK	(0x7ff)
-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(i)			_MMIO(i915->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define EDP_PSR_STATUS				_MMIO(i915->psr_mmio_base + 0x40)
  #define   EDP_PSR_STATUS_STATE_MASK		(7 << 29)
  #define   EDP_PSR_STATUS_STATE_SHIFT		29
  #define   EDP_PSR_STATUS_STATE_IDLE		(0 << 29)
@@ -4290,10 +4290,10 @@ enum {
  #define   EDP_PSR_STATUS_SENDING_TP1		(1 << 4)
  #define   EDP_PSR_STATUS_IDLE_MASK		0xf
-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
+#define EDP_PSR_PERF_CNT		_MMIO(i915->psr_mmio_base + 0x44)
  #define   EDP_PSR_PERF_CNT_MASK		0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
+#define EDP_PSR_DEBUG				_MMIO(i915->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
  #define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1 << 28)
  #define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
  #define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
@@ -4405,7 +4405,7 @@ enum {
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN		_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
+#define PORT_HOTPLUG_EN		_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61110)
  #define   PORTB_HOTPLUG_INT_EN			(1 << 29)
  #define   PORTC_HOTPLUG_INT_EN			(1 << 28)
  #define   PORTD_HOTPLUG_INT_EN			(1 << 27)
@@ -4435,7 +4435,7 @@ enum {
  #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
  #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
-#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
+#define PORT_HOTPLUG_STAT	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61114)
  /*
   * HDMI/DP bits are g4x+
   *
@@ -4517,7 +4517,7 @@ enum {
#define PORT_DFT_I9XX _MMIO(0x61150)
  #define   DC_BALANCE_RESET			(1 << 25)
-#define PORT_DFT2_G4X		_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
+#define PORT_DFT2_G4X		_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61154)
  #define   DC_BALANCE_RESET_VLV			(1 << 31)
  #define   PIPE_SCRAMBLE_RESET_MASK		((1 << 14) | (0x3 << 0))
  #define   PIPE_C_SCRAMBLE_RESET			(1 << 14) /* chv */
@@ -4719,7 +4719,7 @@ enum {
  #define VLV_PPS_BASE			(VLV_DISPLAY_BASE + PPS_BASE)
  #define PCH_PPS_BASE			0xC7200
-#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \
+#define _MMIO_PPS(pps_idx, reg)		_MMIO(i915->pps_mmio_base -	\
  					      PPS_BASE + (reg) +	\
  					      (pps_idx) * 0x100)
@@ -4792,7 +4792,7 @@ enum {
  #define  PANEL_POWER_CYCLE_DELAY_MASK	REG_GENMASK(4, 0)
/* Panel fitting */
-#define PFIT_CONTROL	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
+#define PFIT_CONTROL	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61230)
  #define   PFIT_ENABLE		(1 << 31)
  #define   PFIT_PIPE_MASK	(3 << 29)
  #define   PFIT_PIPE_SHIFT	29
@@ -4810,7 +4810,7 @@ enum {
  #define   PFIT_SCALING_PROGRAMMED (1 << 26)
  #define   PFIT_SCALING_PILLAR	(2 << 26)
  #define   PFIT_SCALING_LETTER	(3 << 26)
-#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
+#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(i915) + 0x61234)
  /* Pre-965 */
  #define		PFIT_VERT_SCALE_SHIFT		20
  #define		PFIT_VERT_SCALE_MASK		0xfff00000
@@ -4822,25 +4822,25 @@ enum {
  #define		PFIT_HORIZ_SCALE_SHIFT_965	0
  #define		PFIT_HORIZ_SCALE_MASK_965	0x00001fff
-#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
+#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(i915) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(i915) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(i915) + 0x61350)
  #define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
  					 _VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(i915) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(i915) + 0x61354)
  #define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
  					_VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(i915) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(i915) + 0x61360)
  #define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
  					 _VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61250) /* 965+ only */
  #define   BLM_PWM_ENABLE		(1 << 31)
  #define   BLM_COMBINATION_MODE		(1 << 30) /* gen4 only */
  #define   BLM_PIPE_SELECT		(1 << 29)
@@ -4863,7 +4863,7 @@ enum {
  #define   BLM_PHASE_IN_COUNT_MASK	(0xff << 8)
  #define   BLM_PHASE_IN_INCR_SHIFT	(0)
  #define   BLM_PHASE_IN_INCR_MASK	(0xff << 0)
-#define BLC_PWM_CTL	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define BLC_PWM_CTL	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61254)
  /*
   * This is the most significant 15 bits of the number of backlight cycles in a
   * complete cycle of the modulated backlight control.
@@ -4885,7 +4885,7 @@ enum {
  #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV		(0xfffe)
  #define   BLM_POLARITY_PNV			(1 << 0) /* pnv only */
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLC_HIST_CTL	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x61260)
  #define  BLM_HISTOGRAM_ENABLE			(1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -5510,47 +5510,47 @@ enum {
   * is 20 bytes in each direction, hence the 5 fixed
   * data registers
   */
-#define _DPA_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
-#define _DPA_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPA_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
-#define _DPA_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
-#define _DPA_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
-#define _DPA_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
-
-#define _DPB_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
-#define _DPB_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
-#define _DPB_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
-#define _DPB_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
-#define _DPB_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
-#define _DPB_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
-
-#define _DPC_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
-#define _DPC_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
-#define _DPC_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
-#define _DPC_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
-#define _DPC_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
-#define _DPC_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
-
-#define _DPD_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
-#define _DPD_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
-#define _DPD_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
-#define _DPD_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
-#define _DPD_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
-#define _DPD_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
-
-#define _DPE_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
-#define _DPE_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
-#define _DPE_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
-#define _DPE_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
-#define _DPE_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
-#define _DPE_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
-
-#define _DPF_AUX_CH_CTL		(DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
-#define _DPF_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
-#define _DPF_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
-#define _DPF_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
-#define _DPF_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
-#define _DPF_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
+#define _DPA_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64010)
+#define _DPA_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64014)
+#define _DPA_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64018)
+#define _DPA_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6401c)
+#define _DPA_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64020)
+#define _DPA_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64024)
+
+#define _DPB_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64110)
+#define _DPB_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64114)
+#define _DPB_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64118)
+#define _DPB_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6411c)
+#define _DPB_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64120)
+#define _DPB_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64124)
+
+#define _DPC_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64210)
+#define _DPC_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64214)
+#define _DPC_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64218)
+#define _DPC_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6421c)
+#define _DPC_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64220)
+#define _DPC_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64224)
+
+#define _DPD_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64310)
+#define _DPD_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64314)
+#define _DPD_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64318)
+#define _DPD_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6431c)
+#define _DPD_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64320)
+#define _DPD_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64324)
+
+#define _DPE_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64410)
+#define _DPE_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64414)
+#define _DPE_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64418)
+#define _DPE_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6441c)
+#define _DPE_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64420)
+#define _DPE_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64424)
+
+#define _DPF_AUX_CH_CTL		(DISPLAY_MMIO_BASE(i915) + 0x64510)
+#define _DPF_AUX_CH_DATA1	(DISPLAY_MMIO_BASE(i915) + 0x64514)
+#define _DPF_AUX_CH_DATA2	(DISPLAY_MMIO_BASE(i915) + 0x64518)
+#define _DPF_AUX_CH_DATA3	(DISPLAY_MMIO_BASE(i915) + 0x6451c)
+#define _DPF_AUX_CH_DATA4	(DISPLAY_MMIO_BASE(i915) + 0x64520)
+#define _DPF_AUX_CH_DATA5	(DISPLAY_MMIO_BASE(i915) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
  #define DP_AUX_CH_DATA(aux_ch, i)	_MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5843,7 +5843,7 @@ enum {
  #define   DPINVGTT_STATUS_MASK			0xff
  #define   DPINVGTT_STATUS_MASK_CHV		0xfff
-#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
+#define DSPARB			_MMIO(DISPLAY_MMIO_BASE(i915) + 0x70030)
  #define   DSPARB_CSTART_MASK	(0x7f << 7)
  #define   DSPARB_CSTART_SHIFT	7
  #define   DSPARB_BSTART_MASK	(0x7f)
@@ -5878,7 +5878,7 @@ enum {
  #define   DSPARB_SPRITEF_MASK_VLV	(0xff << 8)
/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1		_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
+#define DSPFW1		_MMIO(DISPLAY_MMIO_BASE(i915) + 0x70034)
  #define   DSPFW_SR_SHIFT		23
  #define   DSPFW_SR_MASK			(0x1ff << 23)
  #define   DSPFW_CURSORB_SHIFT		16
@@ -5889,7 +5889,7 @@ enum {
  #define   DSPFW_PLANEA_SHIFT		0
  #define   DSPFW_PLANEA_MASK		(0x7f << 0)
  #define   DSPFW_PLANEA_MASK_VLV		(0xff << 0) /* vlv/chv */
-#define DSPFW2		_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
+#define DSPFW2		_MMIO(DISPLAY_MMIO_BASE(i915) + 0x70038)
  #define   DSPFW_FBC_SR_EN		(1 << 31)	  /* g4x */
  #define   DSPFW_FBC_SR_SHIFT		28
  #define   DSPFW_FBC_SR_MASK		(0x7 << 28) /* g4x */
@@ -5905,7 +5905,7 @@ enum {
  #define   DSPFW_SPRITEA_SHIFT		0
  #define   DSPFW_SPRITEA_MASK		(0x7f << 0) /* g4x */
  #define   DSPFW_SPRITEA_MASK_VLV	(0xff << 0) /* vlv/chv */
-#define DSPFW3		_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
+#define DSPFW3		_MMIO(DISPLAY_MMIO_BASE(i915) + 0x7003c)
  #define   DSPFW_HPLL_SR_EN		(1 << 31)
  #define   PINEVIEW_SELF_REFRESH_EN	(1 << 30)
  #define   DSPFW_CURSOR_SR_SHIFT		24
@@ -6322,35 +6322,35 @@ enum {
   * [10:1f] all
   * [30:32] all
   */
-#define SWF0(i)	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
-#define SWF1(i)	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
-#define SWF3(i)	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
+#define SWF0(i)	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x70410 + (i) * 4)
+#define SWF1(i)	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x71410 + (i) * 4)
+#define SWF3(i)	_MMIO(DISPLAY_MMIO_BASE(i915) + 0x72414 + (i) * 4)
  #define SWF_ILK(i)	_MMIO(0x4F000 + (i) * 4)
/* Pipe B */
-#define _PIPEBDSL		(DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
-#define _PIPEBCONF		(DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
-#define _PIPEBSTAT		(DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
+#define _PIPEBDSL		(DISPLAY_MMIO_BASE(i915) + 0x71000)
+#define _PIPEBCONF		(DISPLAY_MMIO_BASE(i915) + 0x71008)
+#define _PIPEBSTAT		(DISPLAY_MMIO_BASE(i915) + 0x71024)
  #define _PIPEBFRAMEHIGH		0x71040
  #define _PIPEBFRAMEPIXEL	0x71044
-#define _PIPEB_FRMCOUNT_G4X	(DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
-#define _PIPEB_FLIPCOUNT_G4X	(DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
+#define _PIPEB_FRMCOUNT_G4X	(DISPLAY_MMIO_BASE(i915) + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X	(DISPLAY_MMIO_BASE(i915) + 0x71044)
/* Display B control */
-#define _DSPBCNTR		(DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
+#define _DSPBCNTR		(DISPLAY_MMIO_BASE(i915) + 0x71180)
  #define   DISPPLANE_ALPHA_TRANS_ENABLE		(1 << 15)
  #define   DISPPLANE_ALPHA_TRANS_DISABLE		0
  #define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
  #define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
-#define _DSPBADDR		(DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
-#define _DSPBSTRIDE		(DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
-#define _DSPBPOS		(DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
-#define _DSPBSIZE		(DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
-#define _DSPBSURF		(DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
-#define _DSPBTILEOFF		(DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
-#define _DSPBOFFSET		(DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
-#define _DSPBSURFLIVE		(DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
+#define _DSPBADDR		(DISPLAY_MMIO_BASE(i915) + 0x71184)
+#define _DSPBSTRIDE		(DISPLAY_MMIO_BASE(i915) + 0x71188)
+#define _DSPBPOS		(DISPLAY_MMIO_BASE(i915) + 0x7118C)
+#define _DSPBSIZE		(DISPLAY_MMIO_BASE(i915) + 0x71190)
+#define _DSPBSURF		(DISPLAY_MMIO_BASE(i915) + 0x7119C)
+#define _DSPBTILEOFF		(DISPLAY_MMIO_BASE(i915) + 0x711A4)
+#define _DSPBOFFSET		(DISPLAY_MMIO_BASE(i915) + 0x711A4)
+#define _DSPBSURFLIVE		(DISPLAY_MMIO_BASE(i915) + 0x711AC)
/* ICL DSI 0 and 1 */
  #define _PIPEDSI0CONF		0x7b008
@@ -8946,7 +8946,7 @@ enum {
  #define   GEN9_ENABLE_GPGPU_PREEMPTION	(1 << 2)
/* Audio */
-#define G4X_AUD_VID_DID			_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
+#define G4X_AUD_VID_DID			_MMIO(DISPLAY_MMIO_BASE(i915) + 0x62020)
  #define   INTEL_AUDIO_DEVCL		0x808629FB
  #define   INTEL_AUDIO_DEVBLC		0x80862801
  #define   INTEL_AUDIO_DEVCTG		0x80862802
@@ -10469,8 +10469,8 @@ enum skl_power_gate {
/* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIC_DEVICE_READY		(dev_priv->mipi_mmio_base + 0xb800)
+#define _MIPIA_DEVICE_READY		(i915->mipi_mmio_base + 0xb000)
+#define _MIPIC_DEVICE_READY		(i915->mipi_mmio_base + 0xb800)
  #define MIPI_DEVICE_READY(port)		_MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
  #define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
  #define  ULPS_STATE_MASK				(3 << 1)
@@ -10479,11 +10479,11 @@ enum skl_power_gate {
  #define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
  #define  DEVICE_READY					(1 << 0)
-#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIC_INTR_STAT		(dev_priv->mipi_mmio_base + 0xb804)
+#define _MIPIA_INTR_STAT		(i915->mipi_mmio_base + 0xb004)
+#define _MIPIC_INTR_STAT		(i915->mipi_mmio_base + 0xb804)
  #define MIPI_INTR_STAT(port)		_MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN			(dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIC_INTR_EN			(dev_priv->mipi_mmio_base + 0xb808)
+#define _MIPIA_INTR_EN			(i915->mipi_mmio_base + 0xb008)
+#define _MIPIC_INTR_EN			(i915->mipi_mmio_base + 0xb808)
  #define MIPI_INTR_EN(port)		_MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
  #define  TEARING_EFFECT					(1 << 31)
  #define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
@@ -10518,8 +10518,8 @@ enum skl_power_gate {
  #define  RXSOT_SYNC_ERROR				(1 << 1)
  #define  RXSOT_ERROR					(1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG		(dev_priv->mipi_mmio_base + 0xb80c)
+#define _MIPIA_DSI_FUNC_PRG		(i915->mipi_mmio_base + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG		(i915->mipi_mmio_base + 0xb80c)
  #define MIPI_DSI_FUNC_PRG(port)		_MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
  #define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
  #define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
@@ -10541,78 +10541,78 @@ enum skl_power_gate {
  #define  DATA_LANES_PRG_REG_SHIFT			0
  #define  DATA_LANES_PRG_REG_MASK			(7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb810)
+#define _MIPIA_HS_TX_TIMEOUT		(i915->mipi_mmio_base + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT		(i915->mipi_mmio_base + 0xb810)
  #define MIPI_HS_TX_TIMEOUT(port)	_MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
  #define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb814)
+#define _MIPIA_LP_RX_TIMEOUT		(i915->mipi_mmio_base + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT		(i915->mipi_mmio_base + 0xb814)
  #define MIPI_LP_RX_TIMEOUT(port)	_MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
  #define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT	(dev_priv->mipi_mmio_base + 0xb818)
+#define _MIPIA_TURN_AROUND_TIMEOUT	(i915->mipi_mmio_base + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT	(i915->mipi_mmio_base + 0xb818)
  #define MIPI_TURN_AROUND_TIMEOUT(port)	_MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
  #define  TURN_AROUND_TIMEOUT_MASK			0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER	(dev_priv->mipi_mmio_base + 0xb81c)
+#define _MIPIA_DEVICE_RESET_TIMER	(i915->mipi_mmio_base + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER	(i915->mipi_mmio_base + 0xb81c)
  #define MIPI_DEVICE_RESET_TIMER(port)	_MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
  #define  DEVICE_RESET_TIMER_MASK			0xffff
-#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIC_DPI_RESOLUTION		(dev_priv->mipi_mmio_base + 0xb820)
+#define _MIPIA_DPI_RESOLUTION		(i915->mipi_mmio_base + 0xb020)
+#define _MIPIC_DPI_RESOLUTION		(i915->mipi_mmio_base + 0xb820)
  #define MIPI_DPI_RESOLUTION(port)	_MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
  #define  VERTICAL_ADDRESS_SHIFT				16
  #define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
  #define  HORIZONTAL_ADDRESS_SHIFT			0
  #define  HORIZONTAL_ADDRESS_MASK			0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE	(dev_priv->mipi_mmio_base + 0xb824)
+#define _MIPIA_DBI_FIFO_THROTTLE	(i915->mipi_mmio_base + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE	(i915->mipi_mmio_base + 0xb824)
  #define MIPI_DBI_FIFO_THROTTLE(port)	_MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
  #define  DBI_FIFO_EMPTY_HALF				(0 << 0)
  #define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
  #define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb828)
+#define _MIPIA_HSYNC_PADDING_COUNT	(i915->mipi_mmio_base + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT	(i915->mipi_mmio_base + 0xb828)
  #define MIPI_HSYNC_PADDING_COUNT(port)	_MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIC_HBP_COUNT		(dev_priv->mipi_mmio_base + 0xb82c)
+#define _MIPIA_HBP_COUNT		(i915->mipi_mmio_base + 0xb02c)
+#define _MIPIC_HBP_COUNT		(i915->mipi_mmio_base + 0xb82c)
  #define MIPI_HBP_COUNT(port)		_MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIC_HFP_COUNT		(dev_priv->mipi_mmio_base + 0xb830)
+#define _MIPIA_HFP_COUNT		(i915->mipi_mmio_base + 0xb030)
+#define _MIPIC_HFP_COUNT		(i915->mipi_mmio_base + 0xb830)
  #define MIPI_HFP_COUNT(port)		_MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT	(dev_priv->mipi_mmio_base + 0xb834)
+#define _MIPIA_HACTIVE_AREA_COUNT	(i915->mipi_mmio_base + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT	(i915->mipi_mmio_base + 0xb834)
  #define MIPI_HACTIVE_AREA_COUNT(port)	_MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb838)
+#define _MIPIA_VSYNC_PADDING_COUNT	(i915->mipi_mmio_base + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT	(i915->mipi_mmio_base + 0xb838)
  #define MIPI_VSYNC_PADDING_COUNT(port)	_MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIC_VBP_COUNT		(dev_priv->mipi_mmio_base + 0xb83c)
+#define _MIPIA_VBP_COUNT		(i915->mipi_mmio_base + 0xb03c)
+#define _MIPIC_VBP_COUNT		(i915->mipi_mmio_base + 0xb83c)
  #define MIPI_VBP_COUNT(port)		_MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIC_VFP_COUNT		(dev_priv->mipi_mmio_base + 0xb840)
+#define _MIPIA_VFP_COUNT		(i915->mipi_mmio_base + 0xb040)
+#define _MIPIC_VFP_COUNT		(i915->mipi_mmio_base + 0xb840)
  #define MIPI_VFP_COUNT(port)		_MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT	(dev_priv->mipi_mmio_base + 0xb844)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT	(i915->mipi_mmio_base + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT	(i915->mipi_mmio_base + 0xb844)
  #define MIPI_HIGH_LOW_SWITCH_COUNT(port)	_MMIO_MIPI(port,	_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */ -#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIC_DPI_CONTROL		(dev_priv->mipi_mmio_base + 0xb848)
+#define _MIPIA_DPI_CONTROL		(i915->mipi_mmio_base + 0xb048)
+#define _MIPIC_DPI_CONTROL		(i915->mipi_mmio_base + 0xb848)
  #define MIPI_DPI_CONTROL(port)		_MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
  #define  DPI_LP_MODE					(1 << 6)
  #define  BACKLIGHT_OFF					(1 << 5)
@@ -10622,27 +10622,27 @@ enum skl_power_gate {
  #define  TURN_ON					(1 << 1)
  #define  SHUTDOWN					(1 << 0)
-#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIC_DPI_DATA			(dev_priv->mipi_mmio_base + 0xb84c)
+#define _MIPIA_DPI_DATA			(i915->mipi_mmio_base + 0xb04c)
+#define _MIPIC_DPI_DATA			(i915->mipi_mmio_base + 0xb84c)
  #define MIPI_DPI_DATA(port)		_MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
  #define  COMMAND_BYTE_SHIFT				0
  #define  COMMAND_BYTE_MASK				(0x3f << 0)
-#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIC_INIT_COUNT		(dev_priv->mipi_mmio_base + 0xb850)
+#define _MIPIA_INIT_COUNT		(i915->mipi_mmio_base + 0xb050)
+#define _MIPIC_INIT_COUNT		(i915->mipi_mmio_base + 0xb850)
  #define MIPI_INIT_COUNT(port)		_MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
  #define  MASTER_INIT_TIMER_SHIFT			0
  #define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE	(dev_priv->mipi_mmio_base + 0xb854)
+#define _MIPIA_MAX_RETURN_PKT_SIZE	(i915->mipi_mmio_base + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE	(i915->mipi_mmio_base + 0xb854)
  #define MIPI_MAX_RETURN_PKT_SIZE(port)	_MMIO_MIPI(port, \
  			_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
  #define  MAX_RETURN_PKT_SIZE_SHIFT			0
  #define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT	(dev_priv->mipi_mmio_base + 0xb858)
+#define _MIPIA_VIDEO_MODE_FORMAT	(i915->mipi_mmio_base + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT	(i915->mipi_mmio_base + 0xb858)
  #define MIPI_VIDEO_MODE_FORMAT(port)	_MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
  #define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
  #define  DISABLE_VIDEO_BTA				(1 << 3)
@@ -10651,8 +10651,8 @@ enum skl_power_gate {
  #define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
  #define  VIDEO_MODE_BURST				(3 << 0)
-#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIC_EOT_DISABLE		(dev_priv->mipi_mmio_base + 0xb85c)
+#define _MIPIA_EOT_DISABLE		(i915->mipi_mmio_base + 0xb05c)
+#define _MIPIC_EOT_DISABLE		(i915->mipi_mmio_base + 0xb85c)
  #define MIPI_EOT_DISABLE(port)		_MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
  #define  BXT_DEFEATURE_DPI_FIFO_CTR			(1 << 9)
  #define  BXT_DPHY_DEFEATURE_EN				(1 << 8)
@@ -10665,35 +10665,35 @@ enum skl_power_gate {
  #define  CLOCKSTOP					(1 << 1)
  #define  EOT_DISABLE					(1 << 0)
-#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIC_LP_BYTECLK		(dev_priv->mipi_mmio_base + 0xb860)
+#define _MIPIA_LP_BYTECLK		(i915->mipi_mmio_base + 0xb060)
+#define _MIPIC_LP_BYTECLK		(i915->mipi_mmio_base + 0xb860)
  #define MIPI_LP_BYTECLK(port)		_MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
  #define  LP_BYTECLK_SHIFT				0
  #define  LP_BYTECLK_MASK				(0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT		(dev_priv->mipi_mmio_base + 0xb8a4)
+#define _MIPIA_TLPX_TIME_COUNT		(i915->mipi_mmio_base + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT		(i915->mipi_mmio_base + 0xb8a4)
  #define MIPI_TLPX_TIME_COUNT(port)	 _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING		(dev_priv->mipi_mmio_base + 0xb898)
+#define _MIPIA_CLK_LANE_TIMING		(i915->mipi_mmio_base + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING		(i915->mipi_mmio_base + 0xb898)
  #define MIPI_CLK_LANE_TIMING(port)	 _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIC_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb864)
+#define _MIPIA_LP_GEN_DATA		(i915->mipi_mmio_base + 0xb064)
+#define _MIPIC_LP_GEN_DATA		(i915->mipi_mmio_base + 0xb864)
  #define MIPI_LP_GEN_DATA(port)		_MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIC_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb868)
+#define _MIPIA_HS_GEN_DATA		(i915->mipi_mmio_base + 0xb068)
+#define _MIPIC_HS_GEN_DATA		(i915->mipi_mmio_base + 0xb868)
  #define MIPI_HS_GEN_DATA(port)		_MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb86c)
+#define _MIPIA_LP_GEN_CTRL		(i915->mipi_mmio_base + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL		(i915->mipi_mmio_base + 0xb86c)
  #define MIPI_LP_GEN_CTRL(port)		_MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIC_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb870)
+#define _MIPIA_HS_GEN_CTRL		(i915->mipi_mmio_base + 0xb070)
+#define _MIPIC_HS_GEN_CTRL		(i915->mipi_mmio_base + 0xb870)
  #define MIPI_HS_GEN_CTRL(port)		_MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
  #define  LONG_PACKET_WORD_COUNT_SHIFT			8
  #define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
@@ -10705,8 +10705,8 @@ enum skl_power_gate {
  #define  DATA_TYPE_MASK					(0x3f << 0)
  /* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT		(dev_priv->mipi_mmio_base + 0xb874)
+#define _MIPIA_GEN_FIFO_STAT		(i915->mipi_mmio_base + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT		(i915->mipi_mmio_base + 0xb874)
  #define MIPI_GEN_FIFO_STAT(port)	_MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
  #define  DPI_FIFO_EMPTY					(1 << 28)
  #define  DBI_FIFO_EMPTY					(1 << 27)
@@ -10723,15 +10723,15 @@ enum skl_power_gate {
  #define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
  #define  HS_DATA_FIFO_FULL				(1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE		(dev_priv->mipi_mmio_base + 0xb878)
+#define _MIPIA_HS_LS_DBI_ENABLE		(i915->mipi_mmio_base + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE		(i915->mipi_mmio_base + 0xb878)
  #define MIPI_HS_LP_DBI_ENABLE(port)	_MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
  #define  DBI_HS_LP_MODE_MASK				(1 << 0)
  #define  DBI_LP_MODE					(1 << 0)
  #define  DBI_HS_MODE					(0 << 0)
-#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIC_DPHY_PARAM		(dev_priv->mipi_mmio_base + 0xb880)
+#define _MIPIA_DPHY_PARAM		(i915->mipi_mmio_base + 0xb080)
+#define _MIPIC_DPHY_PARAM		(i915->mipi_mmio_base + 0xb880)
  #define MIPI_DPHY_PARAM(port)		_MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
  #define  EXIT_ZERO_COUNT_SHIFT				24
  #define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
@@ -10978,34 +10978,34 @@ enum skl_power_gate {
  #define  TA_TIMEOUT_VALUE(x)		((x) << 0)
/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIC_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb884)
+#define _MIPIA_DBI_BW_CTRL		(i915->mipi_mmio_base + 0xb084)
+#define _MIPIC_DBI_BW_CTRL		(i915->mipi_mmio_base + 0xb884)
  #define MIPI_DBI_BW_CTRL(port)		_MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT		(dev_priv->mipi_mmio_base + 0xb888)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(i915->mipi_mmio_base + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT		(i915->mipi_mmio_base + 0xb888)
  #define MIPI_CLK_LANE_SWITCH_TIME_CNT(port)	_MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
  #define  LP_HS_SSW_CNT_SHIFT				16
  #define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
  #define  HS_LP_PWR_SW_CNT_SHIFT				0
  #define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL		(dev_priv->mipi_mmio_base + 0xb88c)
+#define _MIPIA_STOP_STATE_STALL		(i915->mipi_mmio_base + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL		(i915->mipi_mmio_base + 0xb88c)
  #define MIPI_STOP_STATE_STALL(port)	_MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
  #define  STOP_STATE_STALL_COUNTER_SHIFT			0
  #define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1		(dev_priv->mipi_mmio_base + 0xb890)
+#define _MIPIA_INTR_STAT_REG_1		(i915->mipi_mmio_base + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1		(i915->mipi_mmio_base + 0xb890)
  #define MIPI_INTR_STAT_REG_1(port)	_MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIC_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb894)
+#define _MIPIA_INTR_EN_REG_1		(i915->mipi_mmio_base + 0xb094)
+#define _MIPIC_INTR_EN_REG_1		(i915->mipi_mmio_base + 0xb894)
  #define MIPI_INTR_EN_REG_1(port)	_MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
  #define  RX_CONTENTION_DETECTED				(1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL		(dev_priv->mipi_mmio_base + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL		(i915->mipi_mmio_base + 0xb100)
  #define  DBI_TYPEC_ENABLE				(1 << 31)
  #define  DBI_TYPEC_WIP					(1 << 30)
  #define  DBI_TYPEC_OPTION_SHIFT				28
@@ -11019,8 +11019,8 @@ enum skl_power_gate {
/* MIPI adapter registers */ -#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIC_CTRL			(dev_priv->mipi_mmio_base + 0xb904)
+#define _MIPIA_CTRL			(i915->mipi_mmio_base + 0xb104)
+#define _MIPIC_CTRL			(i915->mipi_mmio_base + 0xb904)
  #define MIPI_CTRL(port)			_MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
  #define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
  #define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
@@ -11052,21 +11052,21 @@ enum skl_power_gate {
  #define  GLK_MIPIIO_PORT_POWERED			(1 << 1) /* RO */
  #define  GLK_MIPIIO_ENABLE				(1 << 0)
-#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIC_DATA_ADDRESS		(dev_priv->mipi_mmio_base + 0xb908)
+#define _MIPIA_DATA_ADDRESS		(i915->mipi_mmio_base + 0xb108)
+#define _MIPIC_DATA_ADDRESS		(i915->mipi_mmio_base + 0xb908)
  #define MIPI_DATA_ADDRESS(port)		_MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
  #define  DATA_MEM_ADDRESS_SHIFT				5
  #define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
  #define  DATA_VALID					(1 << 0)
-#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIC_DATA_LENGTH		(dev_priv->mipi_mmio_base + 0xb90c)
+#define _MIPIA_DATA_LENGTH		(i915->mipi_mmio_base + 0xb10c)
+#define _MIPIC_DATA_LENGTH		(i915->mipi_mmio_base + 0xb90c)
  #define MIPI_DATA_LENGTH(port)		_MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
  #define  DATA_LENGTH_SHIFT				0
  #define  DATA_LENGTH_MASK				(0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS		(dev_priv->mipi_mmio_base + 0xb910)
+#define _MIPIA_COMMAND_ADDRESS		(i915->mipi_mmio_base + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS		(i915->mipi_mmio_base + 0xb910)
  #define MIPI_COMMAND_ADDRESS(port)	_MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
  #define  COMMAND_MEM_ADDRESS_SHIFT			5
  #define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
@@ -11074,18 +11074,18 @@ enum skl_power_gate {
  #define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
  #define  COMMAND_VALID					(1 << 0)
-#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIC_COMMAND_LENGTH		(dev_priv->mipi_mmio_base + 0xb914)
+#define _MIPIA_COMMAND_LENGTH		(i915->mipi_mmio_base + 0xb114)
+#define _MIPIC_COMMAND_LENGTH		(i915->mipi_mmio_base + 0xb914)
  #define MIPI_COMMAND_LENGTH(port)	_MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
  #define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
  #define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0	(dev_priv->mipi_mmio_base + 0xb918)
+#define _MIPIA_READ_DATA_RETURN0	(i915->mipi_mmio_base + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0	(i915->mipi_mmio_base + 0xb918)
  #define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIC_READ_DATA_VALID		(dev_priv->mipi_mmio_base + 0xb938)
+#define _MIPIA_READ_DATA_VALID		(i915->mipi_mmio_base + 0xb138)
+#define _MIPIC_READ_DATA_VALID		(i915->mipi_mmio_base + 0xb938)
  #define MIPI_READ_DATA_VALID(port)	_MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
  #define  READ_DATA_VALID(n)				(1 << (n))
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 581201bcb81a..5b3ad7c9240e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -31,121 +31,121 @@
  #include "intel_fbc.h"
  #include "intel_gmbus.h"
-static void i915_save_display(struct drm_i915_private *dev_priv)
+static void i915_save_display(struct drm_i915_private *i915)
  {
  	/* Display arbitration control */
-	if (INTEL_GEN(dev_priv) <= 4)
-		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+	if (INTEL_GEN(i915) <= 4)
+		i915->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
-	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
-		dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+	if (HAS_FBC(i915) && INTEL_GEN(i915) <= 4 && !IS_G4X(i915))
+		i915->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
  }
-static void i915_restore_display(struct drm_i915_private *dev_priv)
+static void i915_restore_display(struct drm_i915_private *i915)
  {
  	/* Display arbitration */
-	if (INTEL_GEN(dev_priv) <= 4)
-		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+	if (INTEL_GEN(i915) <= 4)
+		I915_WRITE(DSPARB, i915->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
-	intel_fbc_global_disable(dev_priv);
+	intel_fbc_global_disable(i915);
/* restore FBC interval */
-	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
-		I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+	if (HAS_FBC(i915) && INTEL_GEN(i915) <= 4 && !IS_G4X(i915))
+		I915_WRITE(FBC_CONTROL, i915->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev_priv);
+	i915_redisable_vga(i915);
  }
-int i915_save_state(struct drm_i915_private *dev_priv)
+int i915_save_state(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
- i915_save_display(dev_priv);
+	i915_save_display(i915);
- if (IS_GEN(dev_priv, 4))
+	if (IS_GEN(i915, 4))
  		pci_read_config_word(pdev, GCDGMBUS,
-				     &dev_priv->regfile.saveGCDGMBUS);
+				     &i915->regfile.saveGCDGMBUS);
/* Cache mode state */
-	if (INTEL_GEN(dev_priv) < 7)
-		dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+	if (INTEL_GEN(i915) < 7)
+		i915->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
-	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+	i915->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
-	if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+	if (IS_GEN(i915, 2) && IS_MOBILE(i915)) {
  		for (i = 0; i < 7; i++) {
-			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+			i915->regfile.saveSWF0[i] = I915_READ(SWF0(i));
+			i915->regfile.saveSWF1[i] = I915_READ(SWF1(i));
  		}
  		for (i = 0; i < 3; i++)
-			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
-	} else if (IS_GEN(dev_priv, 2)) {
+			i915->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+	} else if (IS_GEN(i915, 2)) {
  		for (i = 0; i < 7; i++)
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
-	} else if (HAS_GMCH(dev_priv)) {
+			i915->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+	} else if (HAS_GMCH(i915)) {
  		for (i = 0; i < 16; i++) {
-			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
-			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+			i915->regfile.saveSWF0[i] = I915_READ(SWF0(i));
+			i915->regfile.saveSWF1[i] = I915_READ(SWF1(i));
  		}
  		for (i = 0; i < 3; i++)
-			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+			i915->regfile.saveSWF3[i] = I915_READ(SWF3(i));
  	}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
return 0;
  }
-int i915_restore_state(struct drm_i915_private *dev_priv)
+int i915_restore_state(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
- if (IS_GEN(dev_priv, 4))
+	if (IS_GEN(i915, 4))
  		pci_write_config_word(pdev, GCDGMBUS,
-				      dev_priv->regfile.saveGCDGMBUS);
-	i915_restore_display(dev_priv);
+				      i915->regfile.saveGCDGMBUS);
+	i915_restore_display(i915);
/* Cache mode state */
-	if (INTEL_GEN(dev_priv) < 7)
-		I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+	if (INTEL_GEN(i915) < 7)
+		I915_WRITE(CACHE_MODE_0, i915->regfile.saveCACHE_MODE_0 |
  			   0xffff0000);
/* Memory arbitration state */
-	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+	I915_WRITE(MI_ARB_STATE, i915->regfile.saveMI_ARB_STATE | 0xffff0000);
/* Scratch space */
-	if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+	if (IS_GEN(i915, 2) && IS_MOBILE(i915)) {
  		for (i = 0; i < 7; i++) {
-			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+			I915_WRITE(SWF0(i), i915->regfile.saveSWF0[i]);
+			I915_WRITE(SWF1(i), i915->regfile.saveSWF1[i]);
  		}
  		for (i = 0; i < 3; i++)
-			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
-	} else if (IS_GEN(dev_priv, 2)) {
+			I915_WRITE(SWF3(i), i915->regfile.saveSWF3[i]);
+	} else if (IS_GEN(i915, 2)) {
  		for (i = 0; i < 7; i++)
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
-	} else if (HAS_GMCH(dev_priv)) {
+			I915_WRITE(SWF1(i), i915->regfile.saveSWF1[i]);
+	} else if (HAS_GMCH(i915)) {
  		for (i = 0; i < 16; i++) {
-			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
-			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+			I915_WRITE(SWF0(i), i915->regfile.saveSWF0[i]);
+			I915_WRITE(SWF1(i), i915->regfile.saveSWF1[i]);
  		}
  		for (i = 0; i < 3; i++)
-			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+			I915_WRITE(SWF3(i), i915->regfile.saveSWF3[i]);
  	}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
- intel_gmbus_reset(dev_priv);
+	intel_gmbus_reset(i915);
return 0;
  }
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3ef07b987d40..9c89be3ec4c1 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,14 +42,14 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
  }
#ifdef CONFIG_PM
-static u32 calc_residency(struct drm_i915_private *dev_priv,
+static u32 calc_residency(struct drm_i915_private *i915,
  			  i915_reg_t reg)
  {
  	intel_wakeref_t wakeref;
  	u64 res = 0;
- with_intel_runtime_pm(dev_priv, wakeref)
-		res = intel_rc6_residency_us(dev_priv, reg);
+	with_intel_runtime_pm(i915, wakeref)
+		res = intel_rc6_residency_us(i915, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
  }
@@ -57,15 +57,15 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
  static ssize_t
  show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
  	unsigned int mask;
mask = 0;
-	if (HAS_RC6(dev_priv))
+	if (HAS_RC6(i915))
  		mask |= BIT(0);
-	if (HAS_RC6p(dev_priv))
+	if (HAS_RC6p(i915))
  		mask |= BIT(1);
-	if (HAS_RC6pp(dev_priv))
+	if (HAS_RC6pp(i915))
  		mask |= BIT(2);
return snprintf(buf, PAGE_SIZE, "%x\n", mask);
@@ -74,32 +74,32 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  static ssize_t
  show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(i915, GEN6_GT_GFX_RC6);
  	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  }
static ssize_t
  show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	u32 rc6p_residency = calc_residency(i915, GEN6_GT_GFX_RC6p);
  	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  }
static ssize_t
  show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	u32 rc6pp_residency = calc_residency(i915, GEN6_GT_GFX_RC6pp);
  	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  }
static ssize_t
  show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(i915, VLV_GT_MEDIA_RC6);
  	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  }
@@ -142,9 +142,9 @@ static const struct attribute_group media_rc6_attr_group = {
  };
  #endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
  {
-	if (!HAS_L3_DPF(dev_priv))
+	if (!HAS_L3_DPF(i915))
  		return -EPERM;
if (offset % 4 != 0)
@@ -162,14 +162,14 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
  	     loff_t offset, size_t count)
  {
  	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &i915->drm;
  	int slice = (int)(uintptr_t)attr->private;
  	int ret;
count = round_down(count, 4); - ret = l3_access_valid(dev_priv, offset);
+	ret = l3_access_valid(i915, offset);
  	if (ret)
  		return ret;
@@ -179,9 +179,9 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
  	if (ret)
  		return ret;
- if (dev_priv->l3_parity.remap_info[slice])
+	if (i915->l3_parity.remap_info[slice])
  		memcpy(buf,
-		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       i915->l3_parity.remap_info[slice] + (offset/4),
  		       count);
  	else
  		memset(buf, 0, count);
@@ -197,14 +197,14 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
  	      loff_t offset, size_t count)
  {
  	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &i915->drm;
  	struct i915_gem_context *ctx;
  	int slice = (int)(uintptr_t)attr->private;
  	u32 **remap_info;
  	int ret;
- ret = l3_access_valid(dev_priv, offset);
+	ret = l3_access_valid(i915, offset);
  	if (ret)
  		return ret;
@@ -212,7 +212,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
  	if (ret)
  		return ret;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
+	remap_info = &i915->l3_parity.remap_info[slice];
  	if (!*remap_info) {
  		*remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
  		if (!*remap_info) {
@@ -228,7 +228,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
  	memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link)
+	list_for_each_entry(ctx, &i915->contexts.list, link)
  		ctx->remap_slice |= (1<<slice);
ret = count;
@@ -260,52 +260,52 @@ static const struct bin_attribute dpf_attrs_1 = {
  static ssize_t gt_act_freq_mhz_show(struct device *kdev,
  				    struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
  	intel_wakeref_t wakeref;
  	u32 freq;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_punit_get(dev_priv);
-		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		vlv_punit_put(dev_priv);
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		vlv_punit_get(i915);
+		freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+		vlv_punit_put(i915);
freq = (freq >> 8) & 0xff;
  	} else {
-		freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
+		freq = intel_get_cagf(i915, I915_READ(GEN6_RPSTAT1));
  	}
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+	return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(i915, freq));
  }
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
  				    struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.cur_freq));
+			intel_gpu_freq(i915,
+				       i915->gt_pm.rps.cur_freq));
  }
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.boost_freq));
+			intel_gpu_freq(i915,
+				       i915->gt_pm.rps.boost_freq));
  }
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
  				       struct device_attribute *attr,
  				       const char *buf, size_t count)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	bool boost = false;
  	ssize_t ret;
  	u32 val;
@@ -315,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
  		return ret;
/* Validate against (static) hardware limits */
-	val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(i915, val);
  	if (val < rps->min_freq || val > rps->max_freq)
  		return -EINVAL;
@@ -334,28 +334,28 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
  static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
  				     struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.efficient_freq));
+			intel_gpu_freq(i915,
+				       i915->gt_pm.rps.efficient_freq));
  }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.max_freq_softlimit));
+			intel_gpu_freq(i915,
+				       i915->gt_pm.rps.max_freq_softlimit));
  }
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  				     struct device_attribute *attr,
  				     const char *buf, size_t count)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	intel_wakeref_t wakeref;
  	u32 val;
  	ssize_t ret;
@@ -364,10 +364,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  	if (ret)
  		return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(i915, val);
  	if (val < rps->min_freq ||
  	    val > rps->max_freq ||
  	    val < rps->min_freq_softlimit) {
@@ -377,7 +377,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > rps->rp0_freq)
  		DRM_DEBUG("User requested overclocking to %d\n",
-			  intel_gpu_freq(dev_priv, val));
+			  intel_gpu_freq(i915, val));
rps->max_freq_softlimit = val; @@ -388,30 +388,30 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  	/* We still need *_set_rps to process the new max_delay and
  	 * update the interrupt limits and PMINTRMSK even though
  	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	ret = intel_set_rps(i915, val);
unlock:
  	mutex_unlock(&rps->lock);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return ret ?: count;
  }
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.min_freq_softlimit));
+			intel_gpu_freq(i915,
+				       i915->gt_pm.rps.min_freq_softlimit));
  }
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  				     struct device_attribute *attr,
  				     const char *buf, size_t count)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	intel_wakeref_t wakeref;
  	u32 val;
  	ssize_t ret;
@@ -420,10 +420,10 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  	if (ret)
  		return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(i915, val);
  	if (val < rps->min_freq ||
  	    val > rps->max_freq ||
  	    val > rps->max_freq_softlimit) {
@@ -440,11 +440,11 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  	/* We still need *_set_rps to process the new min_delay and
  	 * update the interrupt limits and PMINTRMSK even though
  	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	ret = intel_set_rps(i915, val);
unlock:
  	mutex_unlock(&rps->lock);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
return ret ?: count;
  }
@@ -465,16 +465,16 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  /* For now we have a static number of RP states */
  static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+		val = intel_gpu_freq(i915, rps->rp0_freq);
  	else if (attr == &dev_attr_gt_RP1_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+		val = intel_gpu_freq(i915, rps->rp1_freq);
  	else if (attr == &dev_attr_gt_RPn_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->min_freq);
+		val = intel_gpu_freq(i915, rps->min_freq);
  	else
  		BUG();
@@ -540,10 +540,10 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
  				 loff_t off, size_t count)
  {
  	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
-	i915_reset_error_state(dev_priv);
+	i915_reset_error_state(i915);
return count;
  }
@@ -571,37 +571,37 @@ static void i915_setup_error_capture(struct device *kdev) {}
  static void i915_teardown_error_capture(struct device *kdev) {}
  #endif
-void i915_setup_sysfs(struct drm_i915_private *dev_priv)
+void i915_setup_sysfs(struct drm_i915_private *i915)
  {
-	struct device *kdev = dev_priv->drm.primary->kdev;
+	struct device *kdev = i915->drm.primary->kdev;
  	int ret;
#ifdef CONFIG_PM
-	if (HAS_RC6(dev_priv)) {
+	if (HAS_RC6(i915)) {
  		ret = sysfs_merge_group(&kdev->kobj,
  					&rc6_attr_group);
  		if (ret)
  			DRM_ERROR("RC6 residency sysfs setup failed\n");
  	}
-	if (HAS_RC6p(dev_priv)) {
+	if (HAS_RC6p(i915)) {
  		ret = sysfs_merge_group(&kdev->kobj,
  					&rc6p_attr_group);
  		if (ret)
  			DRM_ERROR("RC6p residency sysfs setup failed\n");
  	}
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		ret = sysfs_merge_group(&kdev->kobj,
  					&media_rc6_attr_group);
  		if (ret)
  			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
  	}
  #endif
-	if (HAS_L3_DPF(dev_priv)) {
+	if (HAS_L3_DPF(i915)) {
  		ret = device_create_bin_file(kdev, &dpf_attrs);
  		if (ret)
  			DRM_ERROR("l3 parity sysfs setup failed\n");
- if (NUM_L3_SLICES(dev_priv) > 1) {
+		if (NUM_L3_SLICES(i915) > 1) {
  			ret = device_create_bin_file(kdev,
  						     &dpf_attrs_1);
  			if (ret)
@@ -610,9 +610,9 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
  	}
ret = 0;
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
-	else if (INTEL_GEN(dev_priv) >= 6)
+	else if (INTEL_GEN(i915) >= 6)
  		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
  	if (ret)
  		DRM_ERROR("RPS sysfs setup failed\n");
@@ -620,13 +620,13 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
  	i915_setup_error_capture(kdev);
  }
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
+void i915_teardown_sysfs(struct drm_i915_private *i915)
  {
-	struct device *kdev = dev_priv->drm.primary->kdev;
+	struct device *kdev = i915->drm.primary->kdev;
i915_teardown_error_capture(kdev); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		sysfs_remove_files(&kdev->kobj, vlv_attrs);
  	else
  		sysfs_remove_files(&kdev->kobj, gen6_attrs);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5c8cfaa70d72..9548b9fe4199 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,8 +21,8 @@
  /* watermark/fifo updates */
TRACE_EVENT(intel_pipe_enable,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-	    TP_ARGS(dev_priv, pipe),
+	    TP_PROTO(struct drm_i915_private *i915, enum pipe pipe),
+	    TP_ARGS(i915, pipe),
TP_STRUCT__entry(
  			     __array(u32, frame, 3)
@@ -32,11 +32,11 @@ TRACE_EVENT(intel_pipe_enable,
TP_fast_assign(
  			   enum pipe _pipe;
-			   for_each_pipe(dev_priv, _pipe) {
+			   for_each_pipe(i915, _pipe) {
  				   __entry->frame[_pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+					   i915->drm.driver->get_vblank_counter(&i915->drm, _pipe);
  				   __entry->scanline[_pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(i915, _pipe));
  			   }
  			   __entry->pipe = pipe;
  			   ),
@@ -49,8 +49,8 @@ TRACE_EVENT(intel_pipe_enable,
  );
TRACE_EVENT(intel_pipe_disable,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-	    TP_ARGS(dev_priv, pipe),
+	    TP_PROTO(struct drm_i915_private *i915, enum pipe pipe),
+	    TP_ARGS(i915, pipe),
TP_STRUCT__entry(
  			     __array(u32, frame, 3)
@@ -60,11 +60,11 @@ TRACE_EVENT(intel_pipe_disable,
TP_fast_assign(
  			   enum pipe _pipe;
-			   for_each_pipe(dev_priv, _pipe) {
+			   for_each_pipe(i915, _pipe) {
  				   __entry->frame[_pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+					   i915->drm.driver->get_vblank_counter(&i915->drm, _pipe);
  				   __entry->scanline[_pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(i915, _pipe));
  			   }
  			   __entry->pipe = pipe;
  			   ),
@@ -102,8 +102,8 @@ TRACE_EVENT(intel_pipe_crc,
  );
TRACE_EVENT(intel_cpu_fifo_underrun,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-	    TP_ARGS(dev_priv, pipe),
+	    TP_PROTO(struct drm_i915_private *i915, enum pipe pipe),
+	    TP_ARGS(i915, pipe),
TP_STRUCT__entry(
  			     __field(enum pipe, pipe)
@@ -113,8 +113,8 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
TP_fast_assign(
  			   __entry->pipe = pipe;
-			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+			   __entry->frame = i915->drm.driver->get_vblank_counter(&i915->drm, pipe);
+			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(i915, pipe));
  			   ),
TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -123,8 +123,8 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
  );
TRACE_EVENT(intel_pch_fifo_underrun,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
-	    TP_ARGS(dev_priv, pch_transcoder),
+	    TP_PROTO(struct drm_i915_private *i915, enum pipe pch_transcoder),
+	    TP_ARGS(i915, pch_transcoder),
TP_STRUCT__entry(
  			     __field(enum pipe, pipe)
@@ -135,8 +135,8 @@ TRACE_EVENT(intel_pch_fifo_underrun,
  	    TP_fast_assign(
  			   enum pipe pipe = pch_transcoder;
  			   __entry->pipe = pipe;
-			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+			   __entry->frame = i915->drm.driver->get_vblank_counter(&i915->drm, pipe);
+			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(i915, pipe));
  			   ),
TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -145,8 +145,8 @@ TRACE_EVENT(intel_pch_fifo_underrun,
  );
TRACE_EVENT(intel_memory_cxsr,
-	    TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
-	    TP_ARGS(dev_priv, old, new),
+	    TP_PROTO(struct drm_i915_private *i915, bool old, bool new),
+	    TP_ARGS(i915, old, new),
TP_STRUCT__entry(
  			     __array(u32, frame, 3)
@@ -157,11 +157,11 @@ TRACE_EVENT(intel_memory_cxsr,
TP_fast_assign(
  			   enum pipe pipe;
-			   for_each_pipe(dev_priv, pipe) {
+			   for_each_pipe(i915, pipe) {
  				   __entry->frame[pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
+					   i915->drm.driver->get_vblank_counter(&i915->drm, pipe);
  				   __entry->scanline[pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(i915, pipe));
  			   }
  			   __entry->old = old;
  			   __entry->new = new;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..96874a4819a9 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -53,14 +53,14 @@
/**
   * i915_check_vgpu - detect virtual GPU
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   *
   * This function is called at the initialization stage, to detect whether
   * running on a vGPU.
   */
-void i915_check_vgpu(struct drm_i915_private *dev_priv)
+void i915_check_vgpu(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
  	u64 magic;
  	u16 version_major;
@@ -76,15 +76,15 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
  		return;
  	}
- dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+	i915->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
- dev_priv->vgpu.active = true;
+	i915->vgpu.active = true;
  	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
  }
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915)
  {
-	return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
+	return i915->vgpu.caps & VGT_CAPS_FULL_PPGTT;
  }
struct _balloon_info_ {
@@ -112,22 +112,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
/**
   * intel_vgt_deballoon - deballoon reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @i915: i915 device private data
   *
   * This function is called to deallocate the ballooned-out graphic memory, when
   * driver is unloaded or when ballooning fails.
   */
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
+void intel_vgt_deballoon(struct drm_i915_private *i915)
  {
  	int i;
- if (!intel_vgpu_active(dev_priv))
+	if (!intel_vgpu_active(i915))
  		return;
DRM_DEBUG("VGT deballoon.\n"); for (i = 0; i < 4; i++)
-		vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
+		vgt_deballoon_space(&i915->ggtt, &bl_info.space[i]);
  }
static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -153,7 +153,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
/**
   * intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @i915: i915 device private data
   *
   * This function is called at the initialization stage, to balloon out the
   * graphic address space allocated to other vGPUs, by marking these spaces as
@@ -195,16 +195,16 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
   * Returns:
   * zero on success, non-zero if configuration invalid or ballooning failed
   */
-int intel_vgt_balloon(struct drm_i915_private *dev_priv)
+int intel_vgt_balloon(struct drm_i915_private *i915)
  {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
  	unsigned long unmappable_base, unmappable_size, unmappable_end;
  	int ret;
- if (!intel_vgpu_active(dev_priv))
+	if (!intel_vgpu_active(i915))
  		return 0;
mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index ebe1b7bced98..0fa0bfb88bed 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -26,23 +26,23 @@
#include "i915_pvinfo.h" -void i915_check_vgpu(struct drm_i915_private *dev_priv);
+void i915_check_vgpu(struct drm_i915_private *i915);
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915)
  {
-	return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+	return i915->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
  }
static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+intel_vgpu_has_huge_gtt(struct drm_i915_private *i915)
  {
-	return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+	return i915->vgpu.caps & VGT_CAPS_HUGE_GTT;
  }
-int intel_vgt_balloon(struct drm_i915_private *dev_priv);
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct drm_i915_private *i915);
+void intel_vgt_deballoon(struct drm_i915_private *i915);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index cb341e4acf99..e3f4d7006a1b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -555,7 +555,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
  static int
  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
  {
-	struct drm_i915_private *dev_priv = vma->vm->i915;
+	struct drm_i915_private *i915 = vma->vm->i915;
  	unsigned int cache_level;
  	u64 start, end;
  	int ret;
@@ -581,7 +581,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
end = vma->vm->total;
  	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+		end = min_t(u64, end, i915->ggtt.mappable_end);
  	if (flags & PIN_ZONE_4G)
  		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
  	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 74448e6bf749..d0192e093f6f 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -35,32 +35,32 @@
  #include "intel_dsi.h"
  #include "intel_panel.h"
-static inline int header_credits_available(struct drm_i915_private *dev_priv,
+static inline int header_credits_available(struct drm_i915_private *i915,
  					   enum transcoder dsi_trans)
  {
  	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
  		>> FREE_HEADER_CREDIT_SHIFT;
  }
-static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+static inline int payload_credits_available(struct drm_i915_private *i915,
  					    enum transcoder dsi_trans)
  {
  	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
  		>> FREE_PLOAD_CREDIT_SHIFT;
  }
-static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+static void wait_for_header_credits(struct drm_i915_private *i915,
  				    enum transcoder dsi_trans)
  {
-	if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+	if (wait_for_us(header_credits_available(i915, dsi_trans) >=
  			MAX_HEADER_CREDIT, 100))
  		DRM_ERROR("DSI header credits not released\n");
  }
-static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+static void wait_for_payload_credits(struct drm_i915_private *i915,
  				     enum transcoder dsi_trans)
  {
-	if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+	if (wait_for_us(payload_credits_available(i915, dsi_trans) >=
  			MAX_PLOAD_CREDIT, 100))
  		DRM_ERROR("DSI payload credits not released\n");
  }
@@ -75,7 +75,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	struct mipi_dsi_device *dsi;
  	enum port port;
@@ -85,8 +85,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
  	/* wait for header/payload credits to be released */
  	for_each_dsi_port(port, intel_dsi->ports) {
  		dsi_trans = dsi_port_to_transcoder(port);
-		wait_for_header_credits(dev_priv, dsi_trans);
-		wait_for_payload_credits(dev_priv, dsi_trans);
+		wait_for_header_credits(i915, dsi_trans);
+		wait_for_payload_credits(i915, dsi_trans);
  	}
/* send nop DCS command */
@@ -102,7 +102,7 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
  	/* wait for header credits to be released */
  	for_each_dsi_port(port, intel_dsi->ports) {
  		dsi_trans = dsi_port_to_transcoder(port);
-		wait_for_header_credits(dev_priv, dsi_trans);
+		wait_for_header_credits(i915, dsi_trans);
  	}
/* wait for LP TX in progress bit to be cleared */
@@ -118,7 +118,7 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
  			       u32 len)
  {
  	struct intel_dsi *intel_dsi = host->intel_dsi;
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
  	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
  	int free_credits;
  	int i, j;
@@ -126,7 +126,7 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
  	for (i = 0; i < len; i += 4) {
  		u32 tmp = 0;
- free_credits = payload_credits_available(dev_priv, dsi_trans);
+		free_credits = payload_credits_available(i915, dsi_trans);
  		if (free_credits < 1) {
  			DRM_ERROR("Payload credit not available\n");
  			return false;
@@ -145,13 +145,13 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
  			    struct mipi_dsi_packet pkt, bool enable_lpdt)
  {
  	struct intel_dsi *intel_dsi = host->intel_dsi;
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
  	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
  	u32 tmp;
  	int free_credits;
/* check if header credit available */
-	free_credits = header_credits_available(dev_priv, dsi_trans);
+	free_credits = header_credits_available(i915, dsi_trans);
  	if (free_credits < 1) {
  		DRM_ERROR("send pkt header failed, not enough hdr credits\n");
  		return -1;
@@ -200,7 +200,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -266,7 +266,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
  static void configure_dual_link_mode(struct intel_encoder *encoder,
  				     const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 dss_ctl1;
@@ -304,7 +304,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -329,7 +329,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
  	}
  }
-static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+static void get_dsi_io_power_domains(struct drm_i915_private *i915,
  				     struct intel_dsi *intel_dsi)
  {
  	enum port port;
@@ -337,7 +337,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
  	for_each_dsi_port(port, intel_dsi->ports) {
  		WARN_ON(intel_dsi->io_wakeref[port]);
  		intel_dsi->io_wakeref[port] =
-			intel_display_power_get(dev_priv,
+			intel_display_power_get(i915,
  						port == PORT_A ?
  						POWER_DOMAIN_PORT_DDI_A_IO :
  						POWER_DOMAIN_PORT_DDI_B_IO);
@@ -346,7 +346,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -357,23 +357,23 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
  		I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
  	}
- get_dsi_io_power_domains(dev_priv, intel_dsi);
+	get_dsi_io_power_domains(i915, intel_dsi);
  }
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
for_each_dsi_port(port, intel_dsi->ports)
-		intel_combo_phy_power_up_lanes(dev_priv, port, true,
+		intel_combo_phy_power_up_lanes(i915, port, true,
  					       intel_dsi->lane_count, false);
  }
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -409,7 +409,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
@@ -461,7 +461,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
@@ -480,7 +480,7 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
@@ -535,48 +535,48 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
  	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
  	for_each_dsi_port(port, intel_dsi->ports) {
  		tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
  	}
I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
  	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
  	for_each_dsi_port(port, intel_dsi->ports) {
  		tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
  	}
I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
  	enum port port;
  	u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
  	for_each_dsi_port(port, intel_dsi->ports) {
@@ -592,14 +592,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
POSTING_READ(DPCLKA_CFGCR0_ICL); - mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static void
  gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
  	enum pipe pipe = intel_crtc->pipe;
@@ -736,7 +736,7 @@ static void
  gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
  				 const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	const struct drm_display_mode *adjusted_mode =
  					&pipe_config->base.adjusted_mode;
@@ -849,7 +849,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	enum transcoder dsi_trans;
@@ -862,7 +862,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
  		I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    PIPECONF(dsi_trans),
  					    I965_PIPECONF_ACTIVE,
  					    I965_PIPECONF_ACTIVE, 10))
@@ -872,7 +872,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	enum transcoder dsi_trans;
@@ -950,7 +950,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	struct mipi_dsi_device *dsi;
  	enum port port;
@@ -1026,7 +1026,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	enum transcoder dsi_trans;
@@ -1041,7 +1041,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
  		I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    PIPECONF(dsi_trans),
  					    I965_PIPECONF_ACTIVE, 0, 50))
  			DRM_ERROR("DSI trancoder not disabled\n");
@@ -1062,7 +1062,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	enum transcoder dsi_trans;
@@ -1103,7 +1103,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u32 tmp;
  	enum port port;
@@ -1125,7 +1125,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -1134,7 +1134,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
  		intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
-		intel_display_power_put(dev_priv,
+		intel_display_power_put(i915,
  					port == PORT_A ?
  					POWER_DOMAIN_PORT_DDI_A_IO :
  					POWER_DOMAIN_PORT_DDI_B_IO,
@@ -1205,13 +1205,13 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
  static void gen11_dsi_get_config(struct intel_encoder *encoder,
  				 struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
  	pipe_config->port_clock =
-		cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+		cnl_calc_wrpll_link(i915, &pipe_config->dpll_hw_state);
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
  	if (intel_dsi->dual_link)
@@ -1263,7 +1263,7 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
  static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
  				   enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum transcoder dsi_trans;
  	intel_wakeref_t wakeref;
@@ -1271,7 +1271,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
  	bool ret = false;
  	u32 tmp;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
@@ -1298,7 +1298,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
  		ret = tmp & PIPECONF_ENABLE;
  	}
  out:
-	intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
  	return ret;
  }
@@ -1391,8 +1391,8 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
  static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
  {
  	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct mipi_config *mipi_config = i915->vbt.dsi.config;
  	u32 tlpx_ns;
  	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
  	u32 ths_prepare_ns, tclk_trail_ns;
@@ -1487,9 +1487,9 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
  	intel_dsi_log_params(intel_dsi);
  }
-void icl_dsi_init(struct drm_i915_private *dev_priv)
+void icl_dsi_init(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_dsi *intel_dsi;
  	struct intel_encoder *encoder;
  	struct intel_connector *intel_connector;
@@ -1497,7 +1497,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
  	struct drm_display_mode *fixed_mode;
  	enum port port;
- if (!intel_bios_is_dsi_present(dev_priv, &port))
+	if (!intel_bios_is_dsi_present(i915, &port))
  		return;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
@@ -1556,13 +1556,13 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
  	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
  	intel_panel_setup_backlight(connector, INVALID_PIPE);
- if (dev_priv->vbt.dsi.config->dual_link)
+	if (i915->vbt.dsi.config->dual_link)
  		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
  	else
  		intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
-	intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+	intel_dsi->dcs_backlight_ports = i915->vbt.dsi.bl_ports;
+	intel_dsi->dcs_cabc_ports = i915->vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) {
  		struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 6b985e895a97..45782e6ff0f4 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -54,13 +54,13 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
  						u64 *val)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_digital_connector_state *intel_conn_state =
  		to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property)
+	if (property == i915->force_audio_property)
  		*val = intel_conn_state->force_audio;
-	else if (property == dev_priv->broadcast_rgb_property)
+	else if (property == i915->broadcast_rgb_property)
  		*val = intel_conn_state->broadcast_rgb;
  	else {
  		DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
@@ -86,16 +86,16 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
  						u64 val)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_digital_connector_state *intel_conn_state =
  		to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property) {
+	if (property == i915->force_audio_property) {
  		intel_conn_state->force_audio = val;
  		return 0;
  	}
- if (property == dev_priv->broadcast_rgb_property) {
+	if (property == i915->broadcast_rgb_property) {
  		intel_conn_state->broadcast_rgb = val;
  		return 0;
  	}
@@ -227,7 +227,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
  				      struct intel_plane_state *plane_state,
  				      int *scaler_id)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	int j;
  	u32 mode;
@@ -251,10 +251,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
  	    plane_state->base.fb->format->is_yuv &&
  	    plane_state->base.fb->format->num_planes > 1) {
  		struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-		if (IS_GEN(dev_priv, 9) &&
-		    !IS_GEMINILAKE(dev_priv)) {
+		if (IS_GEN(i915, 9) &&
+		    !IS_GEMINILAKE(i915)) {
  			mode = SKL_PS_SCALER_MODE_NV12;
-		} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+		} else if (icl_is_hdr_plane(i915, plane->id)) {
  			/*
  			 * On gen11+'s HDR planes we only use the scaler for
  			 * scaling. They have a dedicated chroma upsampler, so
@@ -267,7 +267,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
  			if (plane_state->linked_plane)
  				mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
  		}
-	} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+	} else if (INTEL_GEN(i915) > 9 || IS_GEMINILAKE(i915)) {
  		mode = PS_SCALER_MODE_NORMAL;
  	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
  		/*
@@ -290,7 +290,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
/**
   * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @intel_crtc: intel crtc
   * @crtc_state: incoming crtc_state to validate and setup scalers
   *
@@ -305,7 +305,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
   *         0 - scalers were setup succesfully
   *         error code - otherwise
   */
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+int intel_atomic_setup_scalers(struct drm_i915_private *i915,
  			       struct intel_crtc *intel_crtc,
  			       struct intel_crtc_state *crtc_state)
  {
@@ -369,7 +369,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
  			 */
  			if (!plane) {
  				struct drm_plane_state *state;
-				plane = drm_plane_from_index(&dev_priv->drm, i);
+				plane = drm_plane_from_index(&i915->drm, i);
  				state = drm_atomic_get_plane_state(drm_state, plane);
  				if (IS_ERR(state)) {
  					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
diff --git a/drivers/gpu/drm/i915/intel_atomic.h b/drivers/gpu/drm/i915/intel_atomic.h
index 1c8507da1a69..5482fd04d639 100644
--- a/drivers/gpu/drm/i915/intel_atomic.h
+++ b/drivers/gpu/drm/i915/intel_atomic.h
@@ -42,7 +42,7 @@ struct intel_crtc_state *
  intel_atomic_get_crtc_state(struct drm_atomic_state *state,
  			    struct intel_crtc *crtc);
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+int intel_atomic_setup_scalers(struct drm_i915_private *i915,
  			       struct intel_crtc *intel_crtc,
  			       struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 840daff12246..f463d8452fe9 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -230,7 +230,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
  			       i915_reg_t reg_elda, u32 bits_elda,
  			       i915_reg_t reg_edid)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	const u8 *eld = connector->eld;
  	u32 tmp;
  	int i;
@@ -256,7 +256,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
  				    const struct intel_crtc_state *old_crtc_state,
  				    const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -277,7 +277,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *crtc_state,
  				   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_connector *connector = conn_state->connector;
  	const u8 *eld = connector->eld;
  	u32 eldv;
@@ -317,8 +317,8 @@ static void
  hsw_dp_audio_config_update(struct intel_encoder *encoder,
  			   const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct i915_audio_component *acomp = i915->audio_component;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	enum port port = encoder->port;
  	const struct dp_aud_n_m *nm;
@@ -364,8 +364,8 @@ static void
  hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
  			     const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct i915_audio_component *acomp = i915->audio_component;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	enum port port = encoder->port;
  	int n, rate;
@@ -416,14 +416,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
  				    const struct intel_crtc_state *old_crtc_state,
  				    const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
  	u32 tmp;
DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
  		      transcoder_name(cpu_transcoder));
- mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
/* Disable timestamps */
  	tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
@@ -441,14 +441,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
  	tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
  	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
- mutex_unlock(&dev_priv->av_mutex);
+	mutex_unlock(&i915->av_mutex);
  }
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *crtc_state,
  				   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_connector *connector = conn_state->connector;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	const u8 *eld = connector->eld;
@@ -458,7 +458,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
  	DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
  		      transcoder_name(cpu_transcoder), drm_eld_size(eld));
- mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
/* Enable audio presence detect, invalidate ELD */
  	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -491,14 +491,14 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
  	/* Enable timestamps */
  	hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->av_mutex);
+	mutex_unlock(&i915->av_mutex);
  }
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
  				    const struct intel_crtc_state *old_crtc_state,
  				    const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	enum pipe pipe = crtc->pipe;
  	enum port port = encoder->port;
@@ -511,10 +511,10 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
  	if (WARN_ON(port == PORT_A))
  		return;
- if (HAS_PCH_IBX(dev_priv)) {
+	if (HAS_PCH_IBX(i915)) {
  		aud_config = IBX_AUD_CFG(pipe);
  		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		aud_config = VLV_AUD_CFG(pipe);
  		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
  	} else {
@@ -544,7 +544,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *crtc_state,
  				   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct drm_connector *connector = conn_state->connector;
  	enum pipe pipe = crtc->pipe;
@@ -567,13 +567,13 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
  	 * infrastructure is not there yet.
  	 */
- if (HAS_PCH_IBX(dev_priv)) {
+	if (HAS_PCH_IBX(i915)) {
  		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
  		aud_config = IBX_AUD_CFG(pipe);
  		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
  		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-	} else if (IS_VALLEYVIEW(dev_priv) ||
-		   IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) ||
+		   IS_CHERRYVIEW(i915)) {
  		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
  		aud_config = VLV_AUD_CFG(pipe);
  		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -632,8 +632,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *crtc_state,
  			      const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct i915_audio_component *acomp = i915->audio_component;
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct drm_connector *connector = conn_state->connector;
  	const struct drm_display_mode *adjusted_mode =
@@ -654,17 +654,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->display.audio_codec_enable)
-		dev_priv->display.audio_codec_enable(encoder,
+	if (i915->display.audio_codec_enable)
+		i915->display.audio_codec_enable(encoder,
  						     crtc_state,
  						     conn_state);
- mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
  	encoder->audio_connector = connector;
/* referred in audio callbacks */
-	dev_priv->av_enc_map[pipe] = encoder;
-	mutex_unlock(&dev_priv->av_mutex);
+	i915->av_enc_map[pipe] = encoder;
+	mutex_unlock(&i915->av_mutex);
if (acomp && acomp->base.audio_ops &&
  	    acomp->base.audio_ops->pin_eld_notify) {
@@ -675,7 +675,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
  						 (int) port, (int) pipe);
  	}
- intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
+	intel_lpe_audio_notify(i915, pipe, port, connector->eld,
  			       crtc_state->port_clock,
  			       intel_crtc_has_dp_encoder(crtc_state));
  }
@@ -693,21 +693,21 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *old_crtc_state,
  			       const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct i915_audio_component *acomp = i915->audio_component;
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	enum port port = encoder->port;
  	enum pipe pipe = crtc->pipe;
- if (dev_priv->display.audio_codec_disable)
-		dev_priv->display.audio_codec_disable(encoder,
+	if (i915->display.audio_codec_disable)
+		i915->display.audio_codec_disable(encoder,
  						      old_crtc_state,
  						      old_conn_state);
- mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
  	encoder->audio_connector = NULL;
-	dev_priv->av_enc_map[pipe] = NULL;
-	mutex_unlock(&dev_priv->av_mutex);
+	i915->av_enc_map[pipe] = NULL;
+	mutex_unlock(&i915->av_mutex);
if (acomp && acomp->base.audio_ops &&
  	    acomp->base.audio_ops->pin_eld_notify) {
@@ -718,31 +718,31 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
  						 (int) port, (int) pipe);
  	}
- intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
+	intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
  }
/**
   * intel_init_audio_hooks - Set up chip specific audio hooks
- * @dev_priv: device private
+ * @i915: device private
   */
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
+void intel_init_audio_hooks(struct drm_i915_private *i915)
  {
-	if (IS_G4X(dev_priv)) {
-		dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
-		dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
-		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
-	} else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
-		dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
-		dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
-		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+	if (IS_G4X(i915)) {
+		i915->display.audio_codec_enable = g4x_audio_codec_enable;
+		i915->display.audio_codec_disable = g4x_audio_codec_disable;
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		i915->display.audio_codec_enable = ilk_audio_codec_enable;
+		i915->display.audio_codec_disable = ilk_audio_codec_disable;
+	} else if (IS_HASWELL(i915) || INTEL_GEN(i915) >= 8) {
+		i915->display.audio_codec_enable = hsw_audio_codec_enable;
+		i915->display.audio_codec_disable = hsw_audio_codec_disable;
+	} else if (HAS_PCH_SPLIT(i915)) {
+		i915->display.audio_codec_enable = ilk_audio_codec_enable;
+		i915->display.audio_codec_disable = ilk_audio_codec_disable;
  	}
  }
-static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+static void glk_force_audio_cdclk(struct drm_i915_private *i915,
  				  bool enable)
  {
  	struct drm_modeset_acquire_ctx ctx;
@@ -750,7 +750,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
  	int ret;
drm_modeset_acquire_init(&ctx, 0);
-	state = drm_atomic_state_alloc(&dev_priv->drm);
+	state = drm_atomic_state_alloc(&i915->drm);
  	if (WARN_ON(!state))
  		return;
@@ -762,11 +762,11 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
  		enable ? 2 * 96000 : 0;
/*
-	 * Protects dev_priv->cdclk.force_min_cdclk
+	 * Protects i915->cdclk.force_min_cdclk
  	 * Need to lock this here in case we have no active pipes
  	 * and thus wouldn't lock it during the commit otherwise.
  	 */
-	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex,
  			       &ctx);
  	if (!ret)
  		ret = drm_atomic_commit(state);
@@ -787,18 +787,18 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
static unsigned long i915_audio_component_get_power(struct device *kdev)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
  	intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
  	BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
- ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+	ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO);
/* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
-	if (dev_priv->audio_power_refcount++ == 0)
-		if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-			glk_force_audio_cdclk(dev_priv, true);
+	if (i915->audio_power_refcount++ == 0)
+		if (IS_CANNONLAKE(i915) || IS_GEMINILAKE(i915))
+			glk_force_audio_cdclk(i915, true);
return ret;
  }
@@ -806,24 +806,24 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
  static void i915_audio_component_put_power(struct device *kdev,
  					   unsigned long cookie)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
-	if (--dev_priv->audio_power_refcount == 0)
-		if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-			glk_force_audio_cdclk(dev_priv, false);
+	if (--i915->audio_power_refcount == 0)
+		if (IS_CANNONLAKE(i915) || IS_GEMINILAKE(i915))
+			glk_force_audio_cdclk(i915, false);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
+	intel_display_power_put(i915, POWER_DOMAIN_AUDIO, cookie);
  }
static void i915_audio_component_codec_wake_override(struct device *kdev,
  						     bool enable)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
  	unsigned long cookie;
  	u32 tmp;
- if (!IS_GEN(dev_priv, 9))
+	if (!IS_GEN(i915, 9))
  		return;
cookie = i915_audio_component_get_power(kdev);
@@ -850,12 +850,12 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
  /* Get CDCLK in kHz  */
  static int i915_audio_component_get_cdclk_freq(struct device *kdev)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+	if (WARN_ON_ONCE(!HAS_DDI(i915)))
  		return -ENODEV;
- return dev_priv->cdclk.hw.cdclk;
+	return i915->cdclk.hw.cdclk;
  }
/*
@@ -868,17 +868,17 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
   *   will get the right intel_encoder with port matched
   * Non-MST & (pipe < 0): get the right intel_encoder with port matched
   */
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
  					       int port, int pipe)
  {
  	struct intel_encoder *encoder;
/* MST */
  	if (pipe >= 0) {
-		if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+		if (WARN_ON(pipe >= ARRAY_SIZE(i915->av_enc_map)))
  			return NULL;
- encoder = dev_priv->av_enc_map[pipe];
+		encoder = i915->av_enc_map[pipe];
  		/*
  		 * when bootup, audio driver may not know it is
  		 * MST or not. So it will poll all the port & pipe
@@ -893,8 +893,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
  	if (pipe > 0)
  		return NULL;
- for_each_pipe(dev_priv, pipe) {
-		encoder = dev_priv->av_enc_map[pipe];
+	for_each_pipe(i915, pipe) {
+		encoder = i915->av_enc_map[pipe];
  		if (encoder == NULL)
  			continue;
@@ -911,21 +911,21 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
  static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
  						int pipe, int rate)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
+	struct i915_audio_component *acomp = i915->audio_component;
  	struct intel_encoder *encoder;
  	struct intel_crtc *crtc;
  	unsigned long cookie;
  	int err = 0;
- if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		return 0;
cookie = i915_audio_component_get_power(kdev);
-	mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
/* 1. get the pipe */
-	encoder = get_saved_enc(dev_priv, port, pipe);
+	encoder = get_saved_enc(i915, port, pipe);
  	if (!encoder || !encoder->base.crtc) {
  		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
  		err = -ENODEV;
@@ -940,7 +940,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
  	hsw_audio_config_update(encoder, crtc->config);
unlock:
-	mutex_unlock(&dev_priv->av_mutex);
+	mutex_unlock(&i915->av_mutex);
  	i915_audio_component_put_power(kdev, cookie);
  	return err;
  }
@@ -949,17 +949,17 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
  					int pipe, bool *enabled,
  					unsigned char *buf, int max_bytes)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(kdev);
  	struct intel_encoder *intel_encoder;
  	const u8 *eld;
  	int ret = -EINVAL;
- mutex_lock(&dev_priv->av_mutex);
+	mutex_lock(&i915->av_mutex);
- intel_encoder = get_saved_enc(dev_priv, port, pipe);
+	intel_encoder = get_saved_enc(i915, port, pipe);
  	if (!intel_encoder) {
  		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
-		mutex_unlock(&dev_priv->av_mutex);
+		mutex_unlock(&i915->av_mutex);
  		return ret;
  	}
@@ -971,7 +971,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
  		memcpy(buf, eld, min(max_bytes, ret));
  	}
- mutex_unlock(&dev_priv->av_mutex);
+	mutex_unlock(&i915->av_mutex);
  	return ret;
  }
@@ -989,7 +989,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
  				     struct device *hda_kdev, void *data)
  {
  	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
  	int i;
if (WARN_ON(acomp->base.ops || acomp->base.dev))
@@ -998,14 +998,14 @@ static int i915_audio_component_bind(struct device *i915_kdev,
  	if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
  		return -ENOMEM;
- drm_modeset_lock_all(&dev_priv->drm);
+	drm_modeset_lock_all(&i915->drm);
  	acomp->base.ops = &i915_audio_component_ops;
  	acomp->base.dev = i915_kdev;
  	BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
  	for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
  		acomp->aud_sample_rate[i] = 0;
-	dev_priv->audio_component = acomp;
-	drm_modeset_unlock_all(&dev_priv->drm);
+	i915->audio_component = acomp;
+	drm_modeset_unlock_all(&i915->drm);
return 0;
  }
@@ -1014,13 +1014,13 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
  					struct device *hda_kdev, void *data)
  {
  	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
- drm_modeset_lock_all(&dev_priv->drm);
+	drm_modeset_lock_all(&i915->drm);
  	acomp->base.ops = NULL;
  	acomp->base.dev = NULL;
-	dev_priv->audio_component = NULL;
-	drm_modeset_unlock_all(&dev_priv->drm);
+	i915->audio_component = NULL;
+	drm_modeset_unlock_all(&i915->drm);
device_link_remove(hda_kdev, i915_kdev);
  }
@@ -1032,7 +1032,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
/**
   * i915_audio_component_init - initialize and register the audio component
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This will register with the component framework a child component which
   * will bind dynamically to the snd_hda_intel driver's corresponding master
@@ -1046,11 +1046,11 @@ static const struct component_ops i915_audio_component_bind_ops = {
   * We ignore any error during registration and continue with reduced
   * functionality (i.e. without HDMI audio).
   */
-static void i915_audio_component_init(struct drm_i915_private *dev_priv)
+static void i915_audio_component_init(struct drm_i915_private *i915)
  {
  	int ret;
- ret = component_add_typed(dev_priv->drm.dev,
+	ret = component_add_typed(i915->drm.dev,
  				  &i915_audio_component_bind_ops,
  				  I915_COMPONENT_AUDIO);
  	if (ret < 0) {
@@ -1059,46 +1059,46 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
  		return;
  	}
- dev_priv->audio_component_registered = true;
+	i915->audio_component_registered = true;
  }
/**
   * i915_audio_component_cleanup - deregister the audio component
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Deregisters the audio component, breaking any existing binding to the
   * corresponding snd_hda_intel driver's master component.
   */
-static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+static void i915_audio_component_cleanup(struct drm_i915_private *i915)
  {
-	if (!dev_priv->audio_component_registered)
+	if (!i915->audio_component_registered)
  		return;
- component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
-	dev_priv->audio_component_registered = false;
+	component_del(i915->drm.dev, &i915_audio_component_bind_ops);
+	i915->audio_component_registered = false;
  }
/**
   * intel_audio_init() - Initialize the audio driver either using
   * component framework or using lpe audio bridge
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   *
   */
-void intel_audio_init(struct drm_i915_private *dev_priv)
+void intel_audio_init(struct drm_i915_private *i915)
  {
-	if (intel_lpe_audio_init(dev_priv) < 0)
-		i915_audio_component_init(dev_priv);
+	if (intel_lpe_audio_init(i915) < 0)
+		i915_audio_component_init(i915);
  }
/**
   * intel_audio_deinit() - deinitialize the audio driver
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   *
   */
-void intel_audio_deinit(struct drm_i915_private *dev_priv)
+void intel_audio_deinit(struct drm_i915_private *i915)
  {
-	if ((dev_priv)->lpe_audio.platdev != NULL)
-		intel_lpe_audio_teardown(dev_priv);
+	if ((i915)->lpe_audio.platdev != NULL)
+		intel_lpe_audio_teardown(i915);
  	else
-		i915_audio_component_cleanup(dev_priv);
+		i915_audio_component_cleanup(i915);
  }
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/intel_audio.h
index a3657c7a7ba2..c8fde42a9fde 100644
--- a/drivers/gpu/drm/i915/intel_audio.h
+++ b/drivers/gpu/drm/i915/intel_audio.h
@@ -11,14 +11,14 @@ struct drm_i915_private;
  struct intel_crtc_state;
  struct intel_encoder;
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_init_audio_hooks(struct drm_i915_private *i915);
  void intel_audio_codec_enable(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *crtc_state,
  			      const struct drm_connector_state *conn_state);
  void intel_audio_codec_disable(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *old_crtc_state,
  			       const struct drm_connector_state *old_conn_state);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
+void intel_audio_init(struct drm_i915_private *i915);
+void intel_audio_deinit(struct drm_i915_private *i915);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1c037dfa83f5..7bdac55d08cc 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -202,7 +202,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
/* Try to find integrated panel data */
  static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+parse_lfp_panel_data(struct drm_i915_private *i915,
  		     const struct bdb_header *bdb)
  {
  	const struct bdb_lvds_options *lvds_options;
@@ -219,9 +219,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
  	if (!lvds_options)
  		return;
- dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
+	i915->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(dev_priv);
+	ret = intel_opregion_get_panel_type(i915);
  	if (ret >= 0) {
  		WARN_ON(ret > 0xf);
  		panel_type = ret;
@@ -236,7 +236,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
  		DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
  	}
- dev_priv->vbt.panel_type = panel_type;
+	i915->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
  				>> (panel_type * 2)) & MODE_MASK;
@@ -247,15 +247,15 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
  	 */
  	switch (drrs_mode) {
  	case 0:
-		dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+		i915->vbt.drrs_type = STATIC_DRRS_SUPPORT;
  		DRM_DEBUG_KMS("DRRS supported mode is static\n");
  		break;
  	case 2:
-		dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+		i915->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
  		DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
  		break;
  	default:
-		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+		i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
  		DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
  		break;
  	}
@@ -278,7 +278,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); - dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+	i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
  	drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -290,20 +290,20 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
  		/* check the resolution, just to be sure */
  		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
  		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
-			dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+			i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
  			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
-				      dev_priv->vbt.bios_lvds_val);
+				      i915->vbt.bios_lvds_val);
  		}
  	}
  }
static void
-parse_lfp_backlight(struct drm_i915_private *dev_priv,
+parse_lfp_backlight(struct drm_i915_private *i915,
  		    const struct bdb_header *bdb)
  {
  	const struct bdb_lfp_backlight_data *backlight_data;
  	const struct lfp_backlight_data_entry *entry;
-	int panel_type = dev_priv->vbt.panel_type;
+	int panel_type = i915->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
  	if (!backlight_data)
@@ -317,38 +317,38 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
entry = &backlight_data->data[panel_type]; - dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
-	if (!dev_priv->vbt.backlight.present) {
+	i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+	if (!i915->vbt.backlight.present) {
  		DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
  			      entry->type);
  		return;
  	}
- dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+	i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
  	if (bdb->version >= 191 &&
  	    get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
  		const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
-		dev_priv->vbt.backlight.type = method->type;
-		dev_priv->vbt.backlight.controller = method->controller;
+		i915->vbt.backlight.type = method->type;
+		i915->vbt.backlight.controller = method->controller;
  	}
- dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
-	dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
-	dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
+	i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+	i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+	i915->vbt.backlight.min_brightness = entry->min_brightness;
  	DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
  		      "active %s, min brightness %u, level %u, controller %u\n",
-		      dev_priv->vbt.backlight.pwm_freq_hz,
-		      dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
-		      dev_priv->vbt.backlight.min_brightness,
+		      i915->vbt.backlight.pwm_freq_hz,
+		      i915->vbt.backlight.active_low_pwm ? "low" : "high",
+		      i915->vbt.backlight.min_brightness,
  		      backlight_data->level[panel_type],
-		      dev_priv->vbt.backlight.controller);
+		      i915->vbt.backlight.controller);
  }
/* Try to find sdvo panel data */
  static void
-parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+parse_sdvo_panel_data(struct drm_i915_private *i915,
  		      const struct bdb_header *bdb)
  {
  	const struct bdb_sdvo_panel_dtds *dtds;
@@ -381,16 +381,16 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]); - dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+	i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
  	drm_mode_debug_printmodeline(panel_fixed_mode);
  }
-static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
+static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
  				    bool alternate)
  {
-	switch (INTEL_GEN(dev_priv)) {
+	switch (INTEL_GEN(i915)) {
  	case 2:
  		return alternate ? 66667 : 48000;
  	case 3:
@@ -402,7 +402,7 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
  }
static void
-parse_general_features(struct drm_i915_private *dev_priv,
+parse_general_features(struct drm_i915_private *i915,
  		       const struct bdb_header *bdb)
  {
  	const struct bdb_general_features *general;
@@ -411,30 +411,30 @@ parse_general_features(struct drm_i915_private *dev_priv,
  	if (!general)
  		return;
- dev_priv->vbt.int_tv_support = general->int_tv_support;
+	i915->vbt.int_tv_support = general->int_tv_support;
  	/* int_crt_support can't be trusted on earlier platforms */
  	if (bdb->version >= 155 &&
-	    (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
-		dev_priv->vbt.int_crt_support = general->int_crt_support;
-	dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
-	dev_priv->vbt.lvds_ssc_freq =
-		intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
-	dev_priv->vbt.display_clock_mode = general->display_clock_mode;
-	dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+	    (HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
+		i915->vbt.int_crt_support = general->int_crt_support;
+	i915->vbt.lvds_use_ssc = general->enable_ssc;
+	i915->vbt.lvds_ssc_freq =
+		intel_bios_ssc_frequency(i915, general->ssc_freq);
+	i915->vbt.display_clock_mode = general->display_clock_mode;
+	i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
  	if (bdb->version >= 181) {
-		dev_priv->vbt.orientation = general->rotate_180 ?
+		i915->vbt.orientation = general->rotate_180 ?
  			DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
  			DRM_MODE_PANEL_ORIENTATION_NORMAL;
  	} else {
-		dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+		i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
  	}
  	DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
-		      dev_priv->vbt.int_tv_support,
-		      dev_priv->vbt.int_crt_support,
-		      dev_priv->vbt.lvds_use_ssc,
-		      dev_priv->vbt.lvds_ssc_freq,
-		      dev_priv->vbt.display_clock_mode,
-		      dev_priv->vbt.fdi_rx_polarity_inverted);
+		      i915->vbt.int_tv_support,
+		      i915->vbt.int_crt_support,
+		      i915->vbt.lvds_use_ssc,
+		      i915->vbt.lvds_ssc_freq,
+		      i915->vbt.display_clock_mode,
+		      i915->vbt.fdi_rx_polarity_inverted);
  }
static const struct child_device_config *
@@ -444,7 +444,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
  }
static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
+parse_sdvo_device_mapping(struct drm_i915_private *i915, u8 bdb_version)
  {
  	struct sdvo_device_mapping *mapping;
  	const struct child_device_config *child;
@@ -454,13 +454,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
  	 * Only parse SDVO mappings on gens that could have SDVO. This isn't
  	 * accurate and doesn't have to be, as long as it's not too strict.
  	 */
-	if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
+	if (!IS_GEN_RANGE(i915, 3, 7)) {
  		DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
  		return;
  	}
- for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0, count = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
if (child->slave_addr != SLAVE_ADDR1 &&
  		    child->slave_addr != SLAVE_ADDR2) {
@@ -481,7 +481,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
  			      child->slave_addr,
  			      (child->dvo_port == DEVICE_PORT_DVOB) ?
  			      "SDVOB" : "SDVOC");
-		mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
+		mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1];
  		if (!mapping->initialized) {
  			mapping->dvo_port = child->dvo_port;
  			mapping->slave_addr = child->slave_addr;
@@ -515,7 +515,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
  }
static void
-parse_driver_features(struct drm_i915_private *dev_priv,
+parse_driver_features(struct drm_i915_private *i915,
  		      const struct bdb_header *bdb)
  {
  	const struct bdb_driver_features *driver;
@@ -524,14 +524,14 @@ parse_driver_features(struct drm_i915_private *dev_priv,
  	if (!driver)
  		return;
- if (INTEL_GEN(dev_priv) >= 5) {
+	if (INTEL_GEN(i915) >= 5) {
  		/*
  		 * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
  		 * to mean "eDP". The VBT spec doesn't agree with that
  		 * interpretation, but real world VBTs seem to.
  		 */
  		if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
-			dev_priv->vbt.int_lvds_support = 0;
+			i915->vbt.int_lvds_support = 0;
  	} else {
  		/*
  		 * FIXME it's not clear which BDB version has the LVDS config
@@ -547,7 +547,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
  		if (bdb->version >= 134 &&
  		    driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
  		    driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
-			dev_priv->vbt.int_lvds_support = 0;
+			i915->vbt.int_lvds_support = 0;
  	}
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
@@ -558,17 +558,17 @@ parse_driver_features(struct drm_i915_private *dev_priv,
  	 * driver->drrs_enabled=false
  	 */
  	if (!driver->drrs_enabled)
-		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
-	dev_priv->vbt.psr.enable = driver->psr_enabled;
+		i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+	i915->vbt.psr.enable = driver->psr_enabled;
  }
static void
-parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_edp(struct drm_i915_private *i915, const struct bdb_header *bdb)
  {
  	const struct bdb_edp *edp;
  	const struct edp_power_seq *edp_pps;
  	const struct edp_fast_link_params *edp_link_params;
-	int panel_type = dev_priv->vbt.panel_type;
+	int panel_type = i915->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
  	if (!edp)
@@ -576,13 +576,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
  	case EDP_18BPP:
-		dev_priv->vbt.edp.bpp = 18;
+		i915->vbt.edp.bpp = 18;
  		break;
  	case EDP_24BPP:
-		dev_priv->vbt.edp.bpp = 24;
+		i915->vbt.edp.bpp = 24;
  		break;
  	case EDP_30BPP:
-		dev_priv->vbt.edp.bpp = 30;
+		i915->vbt.edp.bpp = 30;
  		break;
  	}
@@ -590,14 +590,14 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
  	edp_pps = &edp->power_seqs[panel_type];
  	edp_link_params = &edp->fast_link_params[panel_type];
- dev_priv->vbt.edp.pps = *edp_pps;
+	i915->vbt.edp.pps = *edp_pps;
switch (edp_link_params->rate) {
  	case EDP_RATE_1_62:
-		dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
+		i915->vbt.edp.rate = DP_LINK_BW_1_62;
  		break;
  	case EDP_RATE_2_7:
-		dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
+		i915->vbt.edp.rate = DP_LINK_BW_2_7;
  		break;
  	default:
  		DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
@@ -607,13 +607,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->lanes) {
  	case EDP_LANE_1:
-		dev_priv->vbt.edp.lanes = 1;
+		i915->vbt.edp.lanes = 1;
  		break;
  	case EDP_LANE_2:
-		dev_priv->vbt.edp.lanes = 2;
+		i915->vbt.edp.lanes = 2;
  		break;
  	case EDP_LANE_4:
-		dev_priv->vbt.edp.lanes = 4;
+		i915->vbt.edp.lanes = 4;
  		break;
  	default:
  		DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
@@ -623,16 +623,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->preemphasis) {
  	case EDP_PREEMPHASIS_NONE:
-		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+		i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
  		break;
  	case EDP_PREEMPHASIS_3_5dB:
-		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+		i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
  		break;
  	case EDP_PREEMPHASIS_6dB:
-		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+		i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
  		break;
  	case EDP_PREEMPHASIS_9_5dB:
-		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+		i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
  		break;
  	default:
  		DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -642,16 +642,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->vswing) {
  	case EDP_VSWING_0_4V:
-		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+		i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
  		break;
  	case EDP_VSWING_0_6V:
-		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+		i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
  		break;
  	case EDP_VSWING_0_8V:
-		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+		i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  		break;
  	case EDP_VSWING_1_2V:
-		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+		i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  		break;
  	default:
  		DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -664,21 +664,21 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Don't read from VBT if module parameter has valid value*/
  		if (i915_modparams.edp_vswing) {
-			dev_priv->vbt.edp.low_vswing =
+			i915->vbt.edp.low_vswing =
  				i915_modparams.edp_vswing == 1;
  		} else {
  			vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
-			dev_priv->vbt.edp.low_vswing = vswing == 0;
+			i915->vbt.edp.low_vswing = vswing == 0;
  		}
  	}
  }
static void
-parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
  {
  	const struct bdb_psr *psr;
  	const struct psr_table *psr_table;
-	int panel_type = dev_priv->vbt.panel_type;
+	int panel_type = i915->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
  	if (!psr) {
@@ -688,25 +688,25 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
psr_table = &psr->psr_table[panel_type]; - dev_priv->vbt.psr.full_link = psr_table->full_link;
-	dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+	i915->vbt.psr.full_link = psr_table->full_link;
+	i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
-	dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+	i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
  		psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
switch (psr_table->lines_to_wait) {
  	case 0:
-		dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+		i915->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
  		break;
  	case 1:
-		dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+		i915->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
  		break;
  	case 2:
-		dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+		i915->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
  		break;
  	case 3:
-		dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+		i915->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
  		break;
  	default:
  		DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
@@ -719,48 +719,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
  	 * Old decimal value is wake up time in multiples of 100 us.
  	 */
  	if (bdb->version >= 205 &&
-	    (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
-	     INTEL_GEN(dev_priv) >= 10)) {
+	    (IS_GEN9_BC(i915) || IS_GEMINILAKE(i915) ||
+	     INTEL_GEN(i915) >= 10)) {
  		switch (psr_table->tp1_wakeup_time) {
  		case 0:
-			dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+			i915->vbt.psr.tp1_wakeup_time_us = 500;
  			break;
  		case 1:
-			dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+			i915->vbt.psr.tp1_wakeup_time_us = 100;
  			break;
  		case 3:
-			dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+			i915->vbt.psr.tp1_wakeup_time_us = 0;
  			break;
  		default:
  			DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
  					psr_table->tp1_wakeup_time);
  			/* fallthrough */
  		case 2:
-			dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+			i915->vbt.psr.tp1_wakeup_time_us = 2500;
  			break;
  		}
switch (psr_table->tp2_tp3_wakeup_time) {
  		case 0:
-			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+			i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
  			break;
  		case 1:
-			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+			i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
  			break;
  		case 3:
-			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+			i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
  			break;
  		default:
  			DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
  					psr_table->tp2_tp3_wakeup_time);
  			/* fallthrough */
  		case 2:
-			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+			i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
  		break;
  		}
  	} else {
-		dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
-		dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+		i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+		i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
  	}
if (bdb->version >= 226) {
@@ -782,74 +782,74 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
  			wakeup_time = 2500;
  			break;
  		}
-		dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+		i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
  	} else {
  		/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
-		dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+		i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
  	}
  }
-static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
  				      u16 version, enum port port)
  {
-	if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
-		dev_priv->vbt.dsi.bl_ports = BIT(port);
-		if (dev_priv->vbt.dsi.config->cabc_supported)
-			dev_priv->vbt.dsi.cabc_ports = BIT(port);
+	if (!i915->vbt.dsi.config->dual_link || version < 197) {
+		i915->vbt.dsi.bl_ports = BIT(port);
+		if (i915->vbt.dsi.config->cabc_supported)
+			i915->vbt.dsi.cabc_ports = BIT(port);
return;
  	}
- switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+	switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
  	case DL_DCS_PORT_A:
-		dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+		i915->vbt.dsi.bl_ports = BIT(PORT_A);
  		break;
  	case DL_DCS_PORT_C:
-		dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+		i915->vbt.dsi.bl_ports = BIT(PORT_C);
  		break;
  	default:
  	case DL_DCS_PORT_A_AND_C:
-		dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+		i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
  		break;
  	}
- if (!dev_priv->vbt.dsi.config->cabc_supported)
+	if (!i915->vbt.dsi.config->cabc_supported)
  		return;
- switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+	switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
  	case DL_DCS_PORT_A:
-		dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+		i915->vbt.dsi.cabc_ports = BIT(PORT_A);
  		break;
  	case DL_DCS_PORT_C:
-		dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+		i915->vbt.dsi.cabc_ports = BIT(PORT_C);
  		break;
  	default:
  	case DL_DCS_PORT_A_AND_C:
-		dev_priv->vbt.dsi.cabc_ports =
+		i915->vbt.dsi.cabc_ports =
  					BIT(PORT_A) | BIT(PORT_C);
  		break;
  	}
  }
static void
-parse_mipi_config(struct drm_i915_private *dev_priv,
+parse_mipi_config(struct drm_i915_private *i915,
  		  const struct bdb_header *bdb)
  {
  	const struct bdb_mipi_config *start;
  	const struct mipi_config *config;
  	const struct mipi_pps_data *pps;
-	int panel_type = dev_priv->vbt.panel_type;
+	int panel_type = i915->vbt.panel_type;
  	enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
-	if (!intel_bios_is_dsi_present(dev_priv, &port))
+	if (!intel_bios_is_dsi_present(i915, &port))
  		return;
/* Initialize this to undefined indicating no generic MIPI support */
-	dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+	i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
/* Block #40 is already parsed and panel_fixed_mode is
-	 * stored in dev_priv->lfp_lvds_vbt_mode
+	 * stored in i915->lfp_lvds_vbt_mode
  	 * resuse this when needed
  	 */
@@ -873,17 +873,17 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
  	pps = &start->pps[panel_type];
/* store as of now full data. Trim when we realise all is not needed */
-	dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
-	if (!dev_priv->vbt.dsi.config)
+	i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+	if (!i915->vbt.dsi.config)
  		return;
- dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
-	if (!dev_priv->vbt.dsi.pps) {
-		kfree(dev_priv->vbt.dsi.config);
+	i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+	if (!i915->vbt.dsi.pps) {
+		kfree(i915->vbt.dsi.config);
  		return;
  	}
- parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+	parse_dsi_backlight_ports(i915, bdb->version, port);
/* FIXME is the 90 vs. 270 correct? */
  	switch (config->rotation) {
@@ -892,25 +892,25 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
  		 * Most (all?) VBTs claim 0 degrees despite having
  		 * an upside down panel, thus we do not trust this.
  		 */
-		dev_priv->vbt.dsi.orientation =
+		i915->vbt.dsi.orientation =
  			DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
  		break;
  	case ENABLE_ROTATION_90:
-		dev_priv->vbt.dsi.orientation =
+		i915->vbt.dsi.orientation =
  			DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
  		break;
  	case ENABLE_ROTATION_180:
-		dev_priv->vbt.dsi.orientation =
+		i915->vbt.dsi.orientation =
  			DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
  		break;
  	case ENABLE_ROTATION_270:
-		dev_priv->vbt.dsi.orientation =
+		i915->vbt.dsi.orientation =
  			DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
  		break;
  	}
/* We have mandatory mipi config blocks. Initialize as generic panel */
-	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+	i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
  }
/* Find the sequence block and size for the given panel. */
@@ -1073,12 +1073,12 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
   * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
   * skip all delay + gpio operands and stop at the first DSI packet op.
   */
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
  {
-	const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+	const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
  	int index, len;
- if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+	if (WARN_ON(!data || i915->vbt.dsi.seq_version != 1))
  		return 0;
/* index = 1 to skip sequence byte */
@@ -1106,54 +1106,54 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
   * these devices we split the init OTP sequence into a deassert sequence and
   * the actual init OTP part.
   */
-static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+static void fixup_mipi_sequences(struct drm_i915_private *i915)
  {
  	u8 *init_otp;
  	int len;
/* Limit this to VLV for now. */
-	if (!IS_VALLEYVIEW(dev_priv))
+	if (!IS_VALLEYVIEW(i915))
  		return;
/* Limit this to v1 vid-mode sequences */
-	if (dev_priv->vbt.dsi.config->is_cmd_mode ||
-	    dev_priv->vbt.dsi.seq_version != 1)
+	if (i915->vbt.dsi.config->is_cmd_mode ||
+	    i915->vbt.dsi.seq_version != 1)
  		return;
/* Only do this if there are otp and assert seqs and no deassert seq */
-	if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
-	    !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
-	    dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+	if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+	    !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+	    i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
  		return;
/* The deassert-sequence ends at the first DSI packet */
-	len = get_init_otp_deassert_fragment_len(dev_priv);
+	len = get_init_otp_deassert_fragment_len(i915);
  	if (!len)
  		return;
DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); /* Copy the fragment, update seq byte and terminate it */
-	init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-	dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
-	if (!dev_priv->vbt.dsi.deassert_seq)
+	init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+	i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+	if (!i915->vbt.dsi.deassert_seq)
  		return;
-	dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
-	dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+	i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+	i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
  	/* Use the copy for deassert */
-	dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
-		dev_priv->vbt.dsi.deassert_seq;
+	i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+		i915->vbt.dsi.deassert_seq;
  	/* Replace the last byte of the fragment with init OTP seq byte */
  	init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
  	/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
-	dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+	i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
  }
static void
-parse_mipi_sequence(struct drm_i915_private *dev_priv,
+parse_mipi_sequence(struct drm_i915_private *i915,
  		    const struct bdb_header *bdb)
  {
-	int panel_type = dev_priv->vbt.panel_type;
+	int panel_type = i915->vbt.panel_type;
  	const struct bdb_mipi_sequence *sequence;
  	const u8 *seq_data;
  	u32 seq_size;
@@ -1161,7 +1161,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
  	int index = 0;
/* Only our generic panel driver uses the sequence block. */
-	if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+	if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
  		return;
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
@@ -1202,7 +1202,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
  		if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
  			DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
- dev_priv->vbt.dsi.sequence[seq_id] = data + index;
+		i915->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
  			index = goto_next_sequence_v3(data, index, seq_size);
@@ -1214,18 +1214,18 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
  		}
  	}
- dev_priv->vbt.dsi.data = data;
-	dev_priv->vbt.dsi.size = seq_size;
-	dev_priv->vbt.dsi.seq_version = sequence->version;
+	i915->vbt.dsi.data = data;
+	i915->vbt.dsi.size = seq_size;
+	i915->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(dev_priv);
+	fixup_mipi_sequences(i915);
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
  	return;
err:
  	kfree(data);
-	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
+	memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
  }
static u8 translate_iboost(u8 val)
@@ -1254,16 +1254,16 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
  	return PORT_NONE;
  }
-static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+static void sanitize_ddc_pin(struct drm_i915_private *i915,
  			     enum port port)
  {
-	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
  	enum port p;
if (!info->alternate_ddc_pin)
  		return;
- p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
+	p = get_port_by_ddc_pin(i915, info->alternate_ddc_pin);
  	if (p != PORT_NONE) {
  		DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
  			      "disabling port %c DVI/HDMI support\n",
@@ -1300,16 +1300,16 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
  	return PORT_NONE;
  }
-static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+static void sanitize_aux_ch(struct drm_i915_private *i915,
  			    enum port port)
  {
-	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
  	enum port p;
if (!info->alternate_aux_channel)
  		return;
- p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
+	p = get_port_by_aux_ch(i915, info->alternate_aux_channel);
  	if (p != PORT_NONE) {
  		DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
  			      "disabling port %c DP support\n",
@@ -1347,15 +1347,15 @@ static const u8 icp_ddc_pin_map[] = {
  	[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
  };
-static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
  {
  	const u8 *ddc_pin_map;
  	int n_entries;
- if (HAS_PCH_ICP(dev_priv)) {
+	if (HAS_PCH_ICP(i915)) {
  		ddc_pin_map = icp_ddc_pin_map;
  		n_entries = ARRAY_SIZE(icp_ddc_pin_map);
-	} else if (HAS_PCH_CNP(dev_priv)) {
+	} else if (HAS_PCH_CNP(i915)) {
  		ddc_pin_map = cnp_ddc_pin_map;
  		n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
  	} else {
@@ -1401,7 +1401,7 @@ static enum port dvo_port_to_port(u8 dvo_port)
  	return PORT_NONE;
  }
-static void parse_ddi_port(struct drm_i915_private *dev_priv,
+static void parse_ddi_port(struct drm_i915_private *i915,
  			   const struct child_device_config *child,
  			   u8 bdb_version)
  {
@@ -1413,7 +1413,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
  	if (port == PORT_NONE)
  		return;
- info = &dev_priv->vbt.ddi_port_info[port];
+	info = &i915->vbt.ddi_port_info[port];
if (info->child) {
  		DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
@@ -1447,7 +1447,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
  		      port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
-		      HAS_LSPCON(dev_priv) && child->lspcon,
+		      HAS_LSPCON(i915) && child->lspcon,
  		      info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
@@ -1469,10 +1469,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
  	if (is_dvi) {
  		u8 ddc_pin;
- ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
-		if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+		ddc_pin = map_ddc_pin(i915, child->ddc_pin);
+		if (intel_gmbus_is_valid_pin(i915, ddc_pin)) {
  			info->alternate_ddc_pin = ddc_pin;
-			sanitize_ddc_pin(dev_priv, port);
+			sanitize_ddc_pin(i915, port);
  		} else {
  			DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
  				      "sticking to defaults\n",
@@ -1483,7 +1483,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
  	if (is_dp) {
  		info->alternate_aux_channel = child->aux_channel;
- sanitize_aux_ch(dev_priv, port);
+		sanitize_aux_ch(i915, port);
  	}
if (bdb_version >= 158) {
@@ -1553,26 +1553,26 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
  	info->child = child;
  }
-static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
+static void parse_ddi_ports(struct drm_i915_private *i915, u8 bdb_version)
  {
  	const struct child_device_config *child;
  	int i;
- if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+	if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
  		return;
if (bdb_version < 155)
  		return;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
- parse_ddi_port(dev_priv, child, bdb_version);
+		parse_ddi_port(i915, child, bdb_version);
  	}
  }
static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
+parse_general_definitions(struct drm_i915_private *i915,
  			  const struct bdb_header *bdb)
  {
  	const struct bdb_general_definitions *defs;
@@ -1597,8 +1597,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
bus_pin = defs->crt_ddc_gmbus_pin;
  	DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-	if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
-		dev_priv->vbt.crt_ddc_pin = bus_pin;
+	if (intel_gmbus_is_valid_pin(i915, bus_pin))
+		i915->vbt.crt_ddc_pin = bus_pin;
if (bdb->version < 106) {
  		expected_size = 22;
@@ -1645,13 +1645,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
  		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
  		return;
  	}
-	dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
-	if (!dev_priv->vbt.child_dev) {
+	i915->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
+	if (!i915->vbt.child_dev) {
  		DRM_DEBUG_KMS("No memory space for child device\n");
  		return;
  	}
- dev_priv->vbt.child_dev_num = count;
+	i915->vbt.child_dev_num = count;
  	count = 0;
  	for (i = 0; i < child_device_num; i++) {
  		child = child_device_ptr(defs, i);
@@ -1663,7 +1663,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
  		 * (child_dev_size) of the child device. Accessing the data must
  		 * depend on VBT version.
  		 */
-		memcpy(dev_priv->vbt.child_dev + count, child,
+		memcpy(i915->vbt.child_dev + count, child,
  		       min_t(size_t, defs->child_dev_size, sizeof(*child)));
  		count++;
  	}
@@ -1671,41 +1671,41 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
/* Common defaults which may be overridden by VBT. */
  static void
-init_vbt_defaults(struct drm_i915_private *dev_priv)
+init_vbt_defaults(struct drm_i915_private *i915)
  {
  	enum port port;
- dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+	i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
-	dev_priv->vbt.backlight.present = true;
+	i915->vbt.backlight.present = true;
/* LFP panel data */
-	dev_priv->vbt.lvds_dither = 1;
+	i915->vbt.lvds_dither = 1;
/* SDVO panel data */
-	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+	i915->vbt.sdvo_lvds_vbt_mode = NULL;
/* general features */
-	dev_priv->vbt.int_tv_support = 1;
-	dev_priv->vbt.int_crt_support = 1;
+	i915->vbt.int_tv_support = 1;
+	i915->vbt.int_crt_support = 1;
/* driver features */
-	dev_priv->vbt.int_lvds_support = 1;
+	i915->vbt.int_lvds_support = 1;
/* Default to using SSC */
-	dev_priv->vbt.lvds_use_ssc = 1;
+	i915->vbt.lvds_use_ssc = 1;
  	/*
  	 * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
  	 * clock for LVDS.
  	 */
-	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
-			!HAS_PCH_SPLIT(dev_priv));
-	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+	i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+			!HAS_PCH_SPLIT(i915));
+	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", i915->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
  		struct ddi_vbt_port_info *info =
-			&dev_priv->vbt.ddi_port_info[port];
+			&i915->vbt.ddi_port_info[port];
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
  	}
@@ -1713,19 +1713,19 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Defaults to initialize only if there is no VBT. */
  static void
-init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+init_vbt_missing_defaults(struct drm_i915_private *i915)
  {
  	enum port port;
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
  		struct ddi_vbt_port_info *info =
-			&dev_priv->vbt.ddi_port_info[port];
+			&i915->vbt.ddi_port_info[port];
/*
  		 * VBT has the TypeC mode (native,TBT/USB) and we don't want
  		 * to detect it.
  		 */
-		if (intel_port_is_tc(dev_priv, port))
+		if (intel_port_is_tc(i915, port))
  			continue;
info->supports_dvi = (port != PORT_A && port != PORT_E);
@@ -1811,25 +1811,25 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
/**
   * intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
   * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
   * initialize some defaults if the VBT is not present at all.
   */
-void intel_bios_init(struct drm_i915_private *dev_priv)
+void intel_bios_init(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	const struct vbt_header *vbt = dev_priv->opregion.vbt;
+	struct pci_dev *pdev = i915->drm.pdev;
+	const struct vbt_header *vbt = i915->opregion.vbt;
  	const struct bdb_header *bdb;
  	u8 __iomem *bios = NULL;
- if (!HAS_DISPLAY(dev_priv)) {
+	if (!HAS_DISPLAY(i915)) {
  		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
  		return;
  	}
- init_vbt_defaults(dev_priv);
+	init_vbt_defaults(i915);
/* If the OpRegion does not have VBT, look in PCI ROM. */
  	if (!vbt) {
@@ -1852,25 +1852,25 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
  		      (int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */
-	parse_general_features(dev_priv, bdb);
-	parse_general_definitions(dev_priv, bdb);
-	parse_lfp_panel_data(dev_priv, bdb);
-	parse_lfp_backlight(dev_priv, bdb);
-	parse_sdvo_panel_data(dev_priv, bdb);
-	parse_driver_features(dev_priv, bdb);
-	parse_edp(dev_priv, bdb);
-	parse_psr(dev_priv, bdb);
-	parse_mipi_config(dev_priv, bdb);
-	parse_mipi_sequence(dev_priv, bdb);
+	parse_general_features(i915, bdb);
+	parse_general_definitions(i915, bdb);
+	parse_lfp_panel_data(i915, bdb);
+	parse_lfp_backlight(i915, bdb);
+	parse_sdvo_panel_data(i915, bdb);
+	parse_driver_features(i915, bdb);
+	parse_edp(i915, bdb);
+	parse_psr(i915, bdb);
+	parse_mipi_config(i915, bdb);
+	parse_mipi_sequence(i915, bdb);
/* Further processing on pre-parsed data */
-	parse_sdvo_device_mapping(dev_priv, bdb->version);
-	parse_ddi_ports(dev_priv, bdb->version);
+	parse_sdvo_device_mapping(i915, bdb->version);
+	parse_ddi_ports(i915, bdb->version);
out:
  	if (!vbt) {
  		DRM_INFO("Failed to find VBIOS tables (VBT)\n");
-		init_vbt_missing_defaults(dev_priv);
+		init_vbt_missing_defaults(i915);
  	}
if (bios)
@@ -1879,47 +1879,47 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/**
   * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   */
-void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+void intel_bios_cleanup(struct drm_i915_private *i915)
  {
-	kfree(dev_priv->vbt.child_dev);
-	dev_priv->vbt.child_dev = NULL;
-	dev_priv->vbt.child_dev_num = 0;
-	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
-	kfree(dev_priv->vbt.dsi.data);
-	dev_priv->vbt.dsi.data = NULL;
-	kfree(dev_priv->vbt.dsi.pps);
-	dev_priv->vbt.dsi.pps = NULL;
-	kfree(dev_priv->vbt.dsi.config);
-	dev_priv->vbt.dsi.config = NULL;
-	kfree(dev_priv->vbt.dsi.deassert_seq);
-	dev_priv->vbt.dsi.deassert_seq = NULL;
+	kfree(i915->vbt.child_dev);
+	i915->vbt.child_dev = NULL;
+	i915->vbt.child_dev_num = 0;
+	kfree(i915->vbt.sdvo_lvds_vbt_mode);
+	i915->vbt.sdvo_lvds_vbt_mode = NULL;
+	kfree(i915->vbt.lfp_lvds_vbt_mode);
+	i915->vbt.lfp_lvds_vbt_mode = NULL;
+	kfree(i915->vbt.dsi.data);
+	i915->vbt.dsi.data = NULL;
+	kfree(i915->vbt.dsi.pps);
+	i915->vbt.dsi.pps = NULL;
+	kfree(i915->vbt.dsi.config);
+	i915->vbt.dsi.config = NULL;
+	kfree(i915->vbt.dsi.deassert_seq);
+	i915->vbt.dsi.deassert_seq = NULL;
  }
/**
   * intel_bios_is_tv_present - is integrated TV present in VBT
- * @dev_priv:	i915 device instance
+ * @i915:	i915 device instance
   *
   * Return true if TV is present. If no child devices were parsed from VBT,
   * assume TV is present.
   */
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+bool intel_bios_is_tv_present(struct drm_i915_private *i915)
  {
  	const struct child_device_config *child;
  	int i;
- if (!dev_priv->vbt.int_tv_support)
+	if (!i915->vbt.int_tv_support)
  		return false;
- if (!dev_priv->vbt.child_dev_num)
+	if (!i915->vbt.child_dev_num)
  		return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
  		/*
  		 * If the device type is not TV, continue.
  		 */
@@ -1943,22 +1943,22 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/**
   * intel_bios_is_lvds_present - is LVDS present in VBT
- * @dev_priv:	i915 device instance
+ * @i915:	i915 device instance
   * @i2c_pin:	i2c pin for LVDS if present
   *
   * Return true if LVDS is present. If no child devices were parsed from VBT,
   * assume LVDS is present.
   */
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
  {
  	const struct child_device_config *child;
  	int i;
- if (!dev_priv->vbt.child_dev_num)
+	if (!i915->vbt.child_dev_num)
  		return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
/* If the device type is not LFP, continue.
  		 * We have to check both the new identifiers as well as the
@@ -1968,7 +1968,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
  		    child->device_type != DEVICE_TYPE_LFP)
  			continue;
- if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+		if (intel_gmbus_is_valid_pin(i915, child->i2c_pin))
  			*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
@@ -1984,7 +1984,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
  		 * additional data.  Trust that if the VBT was written into
  		 * the OpRegion then they have validated the LVDS's existence.
  		 */
-		if (dev_priv->opregion.vbt)
+		if (i915->opregion.vbt)
  			return true;
  	}
@@ -1993,12 +1993,12 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) /**
   * intel_bios_is_port_present - is the specified digital port present
- * @dev_priv:	i915 device instance
+ * @i915:	i915 device instance
   * @port:	port to check
   *
   * Return true if the device in %port is present.
   */
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
  {
  	const struct child_device_config *child;
  	static const struct {
@@ -2012,9 +2012,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
  	};
  	int i;
- if (HAS_DDI(dev_priv)) {
+	if (HAS_DDI(i915)) {
  		const struct ddi_vbt_port_info *port_info =
-			&dev_priv->vbt.ddi_port_info[port];
+			&i915->vbt.ddi_port_info[port];
return port_info->supports_dp ||
  		       port_info->supports_dvi ||
@@ -2025,11 +2025,11 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
  	if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
  		return false;
- if (!dev_priv->vbt.child_dev_num)
+	if (!i915->vbt.child_dev_num)
  		return false;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
if ((child->dvo_port == port_mapping[port].dp ||
  		     child->dvo_port == port_mapping[port].hdmi) &&
@@ -2043,12 +2043,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
/**
   * intel_bios_is_port_edp - is the device in given port eDP
- * @dev_priv:	i915 device instance
+ * @i915:	i915 device instance
   * @port:	port to check
   *
   * Return true if the device in %port is eDP.
   */
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
  {
  	const struct child_device_config *child;
  	static const short port_mapping[] = {
@@ -2060,14 +2060,14 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
  	};
  	int i;
- if (HAS_DDI(dev_priv))
-		return dev_priv->vbt.ddi_port_info[port].supports_edp;
+	if (HAS_DDI(i915))
+		return i915->vbt.ddi_port_info[port].supports_edp;
- if (!dev_priv->vbt.child_dev_num)
+	if (!i915->vbt.child_dev_num)
  		return false;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
if (child->dvo_port == port_mapping[port] &&
  		    (child->device_type & DEVICE_TYPE_eDP_BITS) ==
@@ -2113,14 +2113,14 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
  	return false;
  }
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
  				     enum port port)
  {
  	const struct child_device_config *child;
  	int i;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
if (child_dev_is_dp_dual_mode(child, port))
  			return true;
@@ -2131,20 +2131,20 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
/**
   * intel_bios_is_dsi_present - is DSI present in VBT
- * @dev_priv:	i915 device instance
+ * @i915:	i915 device instance
   * @port:	port for DSI if present
   *
   * Return true if DSI is present, and return the port in %port.
   */
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
  			       enum port *port)
  {
  	const struct child_device_config *child;
  	u8 dvo_port;
  	int i;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	for (i = 0; i < i915->vbt.child_dev_num; i++) {
+		child = i915->vbt.child_dev + i;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
  			continue;
@@ -2152,8 +2152,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
  		dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
-		    (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
-		    (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
+		    (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(i915) >= 11) ||
+		    (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(i915) < 11)) {
  			if (port)
  				*port = dvo_port - DVO_PORT_MIPIA;
  			return true;
@@ -2205,11 +2205,11 @@ intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
  	return HAS_LSPCON(i915) && child && child->lspcon;
  }
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
  				   enum port port)
  {
  	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[port];
+		&i915->vbt.ddi_port_info[port];
  	enum aux_ch aux_ch;
if (!info->alternate_aux_channel) {
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4e42cfaf61a7..d292bd1790ae 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -226,19 +226,19 @@ struct mipi_pps_data {
  	u16 panel_power_cycle_delay;
  } __packed;
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+void intel_bios_init(struct drm_i915_private *i915);
+void intel_bios_cleanup(struct drm_i915_private *i915);
  bool intel_bios_is_valid_vbt(const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_tv_present(struct drm_i915_private *i915);
+bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *i915, enum port *port);
  bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
  				     enum port port);
  bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
  				  enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, enum port port);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bw.c b/drivers/gpu/drm/i915/intel_bw.c
index 753ac3165061..1aca44a258d3 100644
--- a/drivers/gpu/drm/i915/intel_bw.c
+++ b/drivers/gpu/drm/i915/intel_bw.c
@@ -22,13 +22,13 @@ struct intel_qgv_info {
  	enum intel_dram_type dram_type;
  };
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *i915,
  					  struct intel_qgv_info *qi)
  {
  	u32 val = 0;
  	int ret;
- ret = sandybridge_pcode_read(dev_priv,
+	ret = sandybridge_pcode_read(i915,
  				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
  				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
  				     &val, NULL);
@@ -61,14 +61,14 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct drm_i915_private *i915,
  					 struct intel_qgv_point *sp,
  					 int point)
  {
  	u32 val = 0, val2;
  	int ret;
- ret = sandybridge_pcode_read(dev_priv,
+	ret = sandybridge_pcode_read(i915,
  				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
  				     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
  				     &val, &val2);
@@ -87,12 +87,12 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct drm_i915_private *i915,
  			      struct intel_qgv_info *qi)
  {
  	int i, ret;
- ret = icl_pcode_read_mem_global_info(dev_priv, qi);
+	ret = icl_pcode_read_mem_global_info(i915, qi);
  	if (ret)
  		return ret;
@@ -102,7 +102,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
  	for (i = 0; i < qi->num_points; i++) {
  		struct intel_qgv_point *sp = &qi->points[i];
- ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+		ret = icl_pcode_read_qgv_point_info(i915, sp, i);
  		if (ret)
  			return ret;
@@ -142,7 +142,7 @@ static const struct intel_sa_info icl_sa_info = {
  	.displayrtids = 128,
  };
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+static int icl_get_bw_info(struct drm_i915_private *i915)
  {
  	struct intel_qgv_info qi = {};
  	const struct intel_sa_info *sa = &icl_sa_info;
@@ -154,7 +154,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
  	int maxdebw;
  	int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi);
+	ret = icl_get_qgv_points(i915, &qi);
  	if (ret) {
  		DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
  		return ret;
@@ -170,8 +170,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
  		      icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
  	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
-		struct intel_bw_info *bi = &dev_priv->max_bw[i];
+	for (i = 0; i < ARRAY_SIZE(i915->max_bw); i++) {
+		struct intel_bw_info *bi = &i915->max_bw[i];
  		int clpchgroup;
  		int j;
@@ -206,18 +206,18 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
  	return 0;
  }
-static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw(struct drm_i915_private *i915,
  			       int num_planes, int qgv_point)
  {
  	int i;
/* Did we initialize the bw limits successfully? */
-	if (dev_priv->max_bw[0].num_planes == 0)
+	if (i915->max_bw[0].num_planes == 0)
  		return UINT_MAX;
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+	for (i = 0; i < ARRAY_SIZE(i915->max_bw); i++) {
  		const struct intel_bw_info *bi =
-			&dev_priv->max_bw[i];
+			&i915->max_bw[i];
if (num_planes >= bi->num_planes)
  			return bi->deratedbw[qgv_point];
@@ -226,24 +226,24 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
  	return 0;
  }
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct drm_i915_private *i915)
  {
-	if (IS_GEN(dev_priv, 11))
-		icl_get_bw_info(dev_priv);
+	if (IS_GEN(i915, 11))
+		icl_get_bw_info(i915);
  }
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_max_data_rate(struct drm_i915_private *i915,
  					int num_planes)
  {
-	if (IS_GEN(dev_priv, 11))
+	if (IS_GEN(i915, 11))
  		/*
  		 * FIXME with SAGV disabled maybe we can assume
  		 * point 1 will always be used? Seems to match
  		 * the behaviour observed in the wild.
  		 */
-		return min3(icl_max_bw(dev_priv, num_planes, 0),
-			    icl_max_bw(dev_priv, num_planes, 1),
-			    icl_max_bw(dev_priv, num_planes, 2));
+		return min3(icl_max_bw(i915, num_planes, 0),
+			    icl_max_bw(i915, num_planes, 1),
+			    icl_max_bw(i915, num_planes, 2));
  	else
  		return UINT_MAX;
  }
@@ -293,25 +293,25 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
  		      bw_state->num_active_planes[crtc->pipe]);
  }
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct drm_i915_private *i915,
  					       const struct intel_bw_state *bw_state)
  {
  	unsigned int num_active_planes = 0;
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
  }
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct drm_i915_private *i915,
  				       const struct intel_bw_state *bw_state)
  {
  	unsigned int data_rate = 0;
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		data_rate += bw_state->data_rate[pipe];
return data_rate;
@@ -319,7 +319,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
int intel_bw_atomic_check(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
  	struct intel_bw_state *bw_state = NULL;
  	unsigned int data_rate, max_data_rate;
@@ -328,7 +328,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
  	int i;
/* FIXME earlier gens need some checks too */
-	if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return 0;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -366,10 +366,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
  	if (!bw_state)
  		return 0;
- data_rate = intel_bw_data_rate(dev_priv, bw_state);
-	num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+	data_rate = intel_bw_data_rate(i915, bw_state);
+	num_active_planes = intel_bw_num_active_planes(i915, bw_state);
- max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+	max_data_rate = intel_max_data_rate(i915, num_active_planes);
data_rate = DIV_ROUND_UP(data_rate, 1000); @@ -406,7 +406,7 @@ static const struct drm_private_state_funcs intel_bw_funcs = {
  	.atomic_destroy_state = intel_bw_destroy_state,
  };
-int intel_bw_init(struct drm_i915_private *dev_priv)
+int intel_bw_init(struct drm_i915_private *i915)
  {
  	struct intel_bw_state *state;
@@ -414,7 +414,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
  	if (!state)
  		return -ENOMEM;
- drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
+	drm_atomic_private_obj_init(&i915->drm, &i915->bw_obj,
  				    &state->base, &intel_bw_funcs);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_bw.h b/drivers/gpu/drm/i915/intel_bw.h
index e9d9c6d63bc3..396e5029f9a4 100644
--- a/drivers/gpu/drm/i915/intel_bw.h
+++ b/drivers/gpu/drm/i915/intel_bw.h
@@ -27,19 +27,19 @@ struct intel_bw_state {
  static inline struct intel_bw_state *
  intel_atomic_get_bw_state(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct drm_private_state *bw_state;
bw_state = drm_atomic_get_private_obj_state(&state->base,
-						    &dev_priv->bw_obj);
+						    &i915->bw_obj);
  	if (IS_ERR(bw_state))
  		return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
  }
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_init_hw(struct drm_i915_private *i915);
+int intel_bw_init(struct drm_i915_private *i915);
  int intel_bw_atomic_check(struct intel_atomic_state *state);
  void intel_bw_crtc_update(struct intel_bw_state *bw_state,
  			  const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 8993ab283562..02c617180c4b 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -53,46 +53,46 @@
   * dividers can be programmed correctly.
   */
-static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_133mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 133333;
  }
-static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_200mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 200000;
  }
-static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_266mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 266667;
  }
-static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_333mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 333333;
  }
-static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_400mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 400000;
  }
-static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_450mhz_get_cdclk(struct drm_i915_private *i915,
  				   struct intel_cdclk_state *cdclk_state)
  {
  	cdclk_state->cdclk = 450000;
  }
-static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
+static void i85x_get_cdclk(struct drm_i915_private *i915,
  			   struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u16 hpllcc = 0;
/*
@@ -131,10 +131,10 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
  	}
  }
-static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i915gm_get_cdclk(struct drm_i915_private *i915,
  			     struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -155,10 +155,10 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
  	}
  }
-static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i945gm_get_cdclk(struct drm_i915_private *i915,
  			     struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -179,7 +179,7 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
  	}
  }
-static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
+static unsigned int intel_hpll_vco(struct drm_i915_private *i915)
  {
  	static const unsigned int blb_vco[8] = {
  		[0] = 3200000,
@@ -223,20 +223,20 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
  	u8 tmp = 0;
/* FIXME other chipsets? */
-	if (IS_GM45(dev_priv))
+	if (IS_GM45(i915))
  		vco_table = ctg_vco;
-	else if (IS_G45(dev_priv))
+	else if (IS_G45(i915))
  		vco_table = elk_vco;
-	else if (IS_I965GM(dev_priv))
+	else if (IS_I965GM(i915))
  		vco_table = cl_vco;
-	else if (IS_PINEVIEW(dev_priv))
+	else if (IS_PINEVIEW(i915))
  		vco_table = pnv_vco;
-	else if (IS_G33(dev_priv))
+	else if (IS_G33(i915))
  		vco_table = blb_vco;
  	else
  		return 0;
- tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+	tmp = I915_READ(IS_PINEVIEW(i915) || IS_MOBILE(i915) ?
  			HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
@@ -248,10 +248,10 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
  	return vco;
  }
-static void g33_get_cdclk(struct drm_i915_private *dev_priv,
+static void g33_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
  	static const u8 div_4000[] = { 14, 12, 10,  8, 6, 20 };
  	static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -260,7 +260,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
  	unsigned int cdclk_sel;
  	u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_state->vco = intel_hpll_vco(i915);
pci_read_config_word(pdev, GCFGC, &tmp); @@ -296,10 +296,10 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
  	cdclk_state->cdclk = 190476;
  }
-static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
+static void pnv_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -329,10 +329,10 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
  	}
  }
-static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i965gm_get_cdclk(struct drm_i915_private *i915,
  			     struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	static const u8 div_3200[] = { 16, 10,  8 };
  	static const u8 div_4000[] = { 20, 12, 10 };
  	static const u8 div_5333[] = { 24, 16, 14 };
@@ -340,7 +340,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
  	unsigned int cdclk_sel;
  	u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_state->vco = intel_hpll_vco(i915);
pci_read_config_word(pdev, GCFGC, &tmp); @@ -373,14 +373,14 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
  	cdclk_state->cdclk = 200000;
  }
-static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
+static void gm45_get_cdclk(struct drm_i915_private *i915,
  			   struct intel_cdclk_state *cdclk_state)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	unsigned int cdclk_sel;
  	u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_state->vco = intel_hpll_vco(i915);
pci_read_config_word(pdev, GCFGC, &tmp); @@ -403,7 +403,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
  	}
  }
-static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
+static void hsw_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 lcpll = I915_READ(LCPLL_CTL);
@@ -415,15 +415,15 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
  		cdclk_state->cdclk = 450000;
  	else if (freq == LCPLL_CLK_FREQ_450)
  		cdclk_state->cdclk = 450000;
-	else if (IS_HSW_ULT(dev_priv))
+	else if (IS_HSW_ULT(i915))
  		cdclk_state->cdclk = 337500;
  	else
  		cdclk_state->cdclk = 540000;
  }
-static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+static int vlv_calc_cdclk(struct drm_i915_private *i915, int min_cdclk)
  {
-	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ?
+	int freq_320 = (i915->hpll_freq <<  1) % 320000 != 0 ?
  		333333 : 320000;
/*
@@ -431,7 +431,7 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
  	 * Not sure what's wrong. For now use 200MHz only when all pipes
  	 * are off.
  	 */
-	if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
+	if (IS_VALLEYVIEW(i915) && min_cdclk > freq_320)
  		return 400000;
  	else if (min_cdclk > 266667)
  		return freq_320;
@@ -441,9 +441,9 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
  		return 200000;
  }
-static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+static u8 vlv_calc_voltage_level(struct drm_i915_private *i915, int cdclk)
  {
-	if (IS_VALLEYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915)) {
  		if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
  			return 2;
  		else if (cdclk >= 266667)
@@ -456,29 +456,29 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
  		 * hardware has shown that we just need to write the desired
  		 * CCK divider into the Punit register.
  		 */
-		return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+		return DIV_ROUND_CLOSEST(i915->hpll_freq << 1, cdclk) - 1;
  	}
  }
-static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
+static void vlv_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 val;
- vlv_iosf_sb_get(dev_priv,
+	vlv_iosf_sb_get(i915,
  			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
-	cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+	cdclk_state->vco = vlv_get_hpll_vco(i915);
+	cdclk_state->cdclk = vlv_get_cck_clock(i915, "cdclk",
  					       CCK_DISPLAY_CLOCK_CONTROL,
  					       cdclk_state->vco);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+	val = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
- vlv_iosf_sb_put(dev_priv,
+	vlv_iosf_sb_put(i915,
  			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- if (IS_VALLEYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915))
  		cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
  			DSPFREQGUAR_SHIFT;
  	else
@@ -486,18 +486,18 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
  			DSPFREQGUAR_SHIFT_CHV;
  }
-static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+static void vlv_program_pfi_credits(struct drm_i915_private *i915)
  {
  	unsigned int credits, default_credits;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		default_credits = PFI_CREDIT(12);
  	else
  		default_credits = PFI_CREDIT(8);
- if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+	if (i915->cdclk.hw.cdclk >= i915->czclk_freq) {
  		/* CHV suggested value is 31 or 63 */
-		if (IS_CHERRYVIEW(dev_priv))
+		if (IS_CHERRYVIEW(i915))
  			credits = PFI_CREDIT_63;
  		else
  			credits = PFI_CREDIT(15);
@@ -522,7 +522,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
  	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
  }
-static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
+static void vlv_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -548,18 +548,18 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
  	 * a system suspend.  So grab the PIPE-A domain, which covers
  	 * the HW blocks needed for the following programming.
  	 */
-	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_PIPE_A);
- vlv_iosf_sb_get(dev_priv,
+	vlv_iosf_sb_get(i915,
  			BIT(VLV_IOSF_SB_CCK) |
  			BIT(VLV_IOSF_SB_BUNIT) |
  			BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+	val = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
  	val &= ~DSPFREQGUAR_MASK;
  	val |= (cmd << DSPFREQGUAR_SHIFT);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+	vlv_punit_write(i915, PUNIT_REG_DSPSSPM, val);
+	if (wait_for((vlv_punit_read(i915, PUNIT_REG_DSPSSPM) &
  		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
  		     50)) {
  		DRM_ERROR("timed out waiting for CDclk change\n");
@@ -568,23 +568,23 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
  	if (cdclk == 400000) {
  		u32 divider;
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+		divider = DIV_ROUND_CLOSEST(i915->hpll_freq << 1,
  					    cdclk) - 1;
/* adjust cdclk divider */
-		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+		val = vlv_cck_read(i915, CCK_DISPLAY_CLOCK_CONTROL);
  		val &= ~CCK_FREQUENCY_VALUES;
  		val |= divider;
-		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+		vlv_cck_write(i915, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+		if (wait_for((vlv_cck_read(i915, CCK_DISPLAY_CLOCK_CONTROL) &
  			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
  			     50))
  			DRM_ERROR("timed out waiting for CDclk change\n");
  	}
/* adjust self-refresh exit latency value */
-	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+	val = vlv_bunit_read(i915, BUNIT_REG_BISOC);
  	val &= ~0x7f;
/*
@@ -595,21 +595,21 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
  		val |= 4500 / 250; /* 4.5 usec */
  	else
  		val |= 3000 / 250; /* 3.0 usec */
-	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+	vlv_bunit_write(i915, BUNIT_REG_BISOC, val);
- vlv_iosf_sb_put(dev_priv,
+	vlv_iosf_sb_put(i915,
  			BIT(VLV_IOSF_SB_CCK) |
  			BIT(VLV_IOSF_SB_BUNIT) |
  			BIT(VLV_IOSF_SB_PUNIT));
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
- vlv_program_pfi_credits(dev_priv);
+	vlv_program_pfi_credits(i915);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_PIPE_A, wakeref);
  }
-static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+static void chv_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -634,26 +634,26 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
  	 * a system suspend.  So grab the PIPE-A domain, which covers
  	 * the HW blocks needed for the following programming.
  	 */
-	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_PIPE_A);
- vlv_punit_get(dev_priv);
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+	vlv_punit_get(i915);
+	val = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
  	val &= ~DSPFREQGUAR_MASK_CHV;
  	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+	vlv_punit_write(i915, PUNIT_REG_DSPSSPM, val);
+	if (wait_for((vlv_punit_read(i915, PUNIT_REG_DSPSSPM) &
  		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
  		     50)) {
  		DRM_ERROR("timed out waiting for CDclk change\n");
  	}
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
- vlv_program_pfi_credits(dev_priv);
+	vlv_program_pfi_credits(i915);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_PIPE_A, wakeref);
  }
static int bdw_calc_cdclk(int min_cdclk)
@@ -683,7 +683,7 @@ static u8 bdw_calc_voltage_level(int cdclk)
  	}
  }
-static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
+static void bdw_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 lcpll = I915_READ(LCPLL_CTL);
@@ -710,7 +710,7 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
  		bdw_calc_voltage_level(cdclk_state->cdclk);
  }
-static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
+static void bdw_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -726,7 +726,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
  		 "trying to change cdclk frequency with cdclk not enabled\n"))
  		return;
- ret = sandybridge_pcode_write(dev_priv,
+	ret = sandybridge_pcode_write(i915,
  				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
  	if (ret) {
  		DRM_ERROR("failed to inform pcode about cdclk change\n");
@@ -776,12 +776,12 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
  			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  		DRM_ERROR("Switching back to LCPLL failed\n");
- sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+	sandybridge_pcode_write(i915, HSW_PCODE_DE_WRITE_FREQ_REQ,
  				cdclk_state->voltage_level);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); - intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
  }
static int skl_calc_cdclk(int min_cdclk, int vco)
@@ -819,7 +819,7 @@ static u8 skl_calc_voltage_level(int cdclk)
  		return 0;
  }
-static void skl_dpll0_update(struct drm_i915_private *dev_priv,
+static void skl_dpll0_update(struct drm_i915_private *i915,
  			     struct intel_cdclk_state *cdclk_state)
  {
  	u32 val;
@@ -859,12 +859,12 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
  	}
  }
-static void skl_get_cdclk(struct drm_i915_private *dev_priv,
+static void skl_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 cdctl;
- skl_dpll0_update(dev_priv, cdclk_state);
+	skl_dpll0_update(i915, cdclk_state);
cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; @@ -926,18 +926,18 @@ static int skl_cdclk_decimal(int cdclk)
  	return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
  }
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915,
  					int vco)
  {
-	bool changed = dev_priv->skl_preferred_vco_freq != vco;
+	bool changed = i915->skl_preferred_vco_freq != vco;
- dev_priv->skl_preferred_vco_freq = vco;
+	i915->skl_preferred_vco_freq = vco;
if (changed)
-		intel_update_max_cdclk(dev_priv);
+		intel_update_max_cdclk(i915);
  }
-static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+static void skl_dpll0_enable(struct drm_i915_private *i915, int vco)
  {
  	u32 val;
@@ -969,29 +969,29 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); - if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  				    5))
  		DRM_ERROR("DPLL0 not locked\n");
- dev_priv->cdclk.hw.vco = vco;
+	i915->cdclk.hw.vco = vco;
/* We'll want to keep using the current vco from now on. */
-	skl_set_preferred_cdclk_vco(dev_priv, vco);
+	skl_set_preferred_cdclk_vco(i915, vco);
  }
-static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
+static void skl_dpll0_disable(struct drm_i915_private *i915)
  {
  	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
  				    1))
  		DRM_ERROR("Couldn't disable DPLL0\n");
- dev_priv->cdclk.hw.vco = 0;
+	i915->cdclk.hw.vco = 0;
  }
-static void skl_set_cdclk(struct drm_i915_private *dev_priv,
+static void skl_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -1008,9 +1008,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
  	 * use the corresponding VCO freq as that always leads to using the
  	 * minimum 308MHz CDCLK.
  	 */
-	WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+	WARN_ON_ONCE(IS_SKYLAKE(i915) && vco == 8640000);
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	ret = skl_pcode_request(i915, SKL_PCODE_CDCLK_CONTROL,
  				SKL_CDCLK_PREPARE_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1023,7 +1023,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
  	/* Choose frequency for this cdclk */
  	switch (cdclk) {
  	default:
-		WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+		WARN_ON(cdclk != i915->cdclk.hw.bypass);
  		WARN_ON(vco != 0);
  		/* fall through */
  	case 308571:
@@ -1043,13 +1043,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
  		break;
  	}
- if (dev_priv->cdclk.hw.vco != 0 &&
-	    dev_priv->cdclk.hw.vco != vco)
-		skl_dpll0_disable(dev_priv);
+	if (i915->cdclk.hw.vco != 0 &&
+	    i915->cdclk.hw.vco != vco)
+		skl_dpll0_disable(i915);
cdclk_ctl = I915_READ(CDCLK_CTL); - if (dev_priv->cdclk.hw.vco != vco) {
+	if (i915->cdclk.hw.vco != vco) {
  		/* Wa Display #1183: skl,kbl,cfl */
  		cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
  		cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
@@ -1061,8 +1061,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
  	I915_WRITE(CDCLK_CTL, cdclk_ctl);
  	POSTING_READ(CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco)
-		skl_dpll0_enable(dev_priv, vco);
+	if (i915->cdclk.hw.vco != vco)
+		skl_dpll0_enable(i915, vco);
/* Wa Display #1183: skl,kbl,cfl */
  	cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
@@ -1077,13 +1077,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
  	POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
-	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	sandybridge_pcode_write(i915, SKL_PCODE_CDCLK_CONTROL,
  				cdclk_state->voltage_level);
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
  }
-static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct drm_i915_private *i915)
  {
  	u32 cdctl, expected;
@@ -1095,12 +1095,12 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
  		goto sanitize;
- intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
-	if (dev_priv->cdclk.hw.vco == 0 ||
-	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+	if (i915->cdclk.hw.vco == 0 ||
+	    i915->cdclk.hw.cdclk == i915->cdclk.hw.bypass)
  		goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1111,7 +1111,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	 */
  	cdctl = I915_READ(CDCLK_CTL);
  	expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
-		skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+		skl_cdclk_decimal(i915->cdclk.hw.cdclk);
  	if (cdctl == expected)
  		/* All well; nothing to sanitize */
  		return;
@@ -1120,49 +1120,49 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
-	dev_priv->cdclk.hw.cdclk = 0;
+	i915->cdclk.hw.cdclk = 0;
  	/* force full PLL disable + enable */
-	dev_priv->cdclk.hw.vco = -1;
+	i915->cdclk.hw.vco = -1;
  }
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_init_cdclk(struct drm_i915_private *i915)
  {
  	struct intel_cdclk_state cdclk_state;
- skl_sanitize_cdclk(dev_priv);
+	skl_sanitize_cdclk(i915);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
-	    dev_priv->cdclk.hw.vco != 0) {
+	if (i915->cdclk.hw.cdclk != 0 &&
+	    i915->cdclk.hw.vco != 0) {
  		/*
  		 * Use the current vco as our initial
  		 * guess as to what the preferred vco is.
  		 */
-		if (dev_priv->skl_preferred_vco_freq == 0)
-			skl_set_preferred_cdclk_vco(dev_priv,
-						    dev_priv->cdclk.hw.vco);
+		if (i915->skl_preferred_vco_freq == 0)
+			skl_set_preferred_cdclk_vco(i915,
+						    i915->cdclk.hw.vco);
  		return;
  	}
- cdclk_state = dev_priv->cdclk.hw;
+	cdclk_state = i915->cdclk.hw;
- cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
+	cdclk_state.vco = i915->skl_preferred_vco_freq;
  	if (cdclk_state.vco == 0)
  		cdclk_state.vco = 8100000;
  	cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
  	cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	skl_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_uninit_cdclk(struct drm_i915_private *i915)
  {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_state cdclk_state = i915->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
  	cdclk_state.vco = 0;
  	cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	skl_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
static int bxt_calc_cdclk(int min_cdclk)
@@ -1194,11 +1194,11 @@ static u8 bxt_calc_voltage_level(int cdclk)
  	return DIV_ROUND_UP(cdclk, 25000);
  }
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static int bxt_de_pll_vco(struct drm_i915_private *i915, int cdclk)
  {
  	int ratio;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+	if (cdclk == i915->cdclk.hw.bypass)
  		return 0;
switch (cdclk) {
@@ -1216,14 +1216,14 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  		break;
  	}
- return dev_priv->cdclk.hw.ref * ratio;
+	return i915->cdclk.hw.ref * ratio;
  }
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static int glk_de_pll_vco(struct drm_i915_private *i915, int cdclk)
  {
  	int ratio;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+	if (cdclk == i915->cdclk.hw.bypass)
  		return 0;
switch (cdclk) {
@@ -1237,10 +1237,10 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  		break;
  	}
- return dev_priv->cdclk.hw.ref * ratio;
+	return i915->cdclk.hw.ref * ratio;
  }
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
+static void bxt_de_pll_update(struct drm_i915_private *i915,
  			      struct intel_cdclk_state *cdclk_state)
  {
  	u32 val;
@@ -1259,13 +1259,13 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
  	cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
  }
-static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
+static void bxt_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 divider;
  	int div;
- bxt_de_pll_update(dev_priv, cdclk_state);
+	bxt_de_pll_update(i915, cdclk_state);
cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; @@ -1279,7 +1279,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
  		div = 2;
  		break;
  	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
-		WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+		WARN(IS_GEMINILAKE(i915), "Unsupported divider\n");
  		div = 3;
  		break;
  	case BXT_CDCLK_CD2X_DIV_SEL_2:
@@ -1304,22 +1304,22 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
  		bxt_calc_voltage_level(cdclk_state->cdclk);
  }
-static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+static void bxt_de_pll_disable(struct drm_i915_private *i915)
  {
  	I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
  				    1))
  		DRM_ERROR("timeout waiting for DE PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+	i915->cdclk.hw.vco = 0;
  }
-static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+static void bxt_de_pll_enable(struct drm_i915_private *i915, int vco)
  {
-	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+	int ratio = DIV_ROUND_CLOSEST(vco, i915->cdclk.hw.ref);
  	u32 val;
val = I915_READ(BXT_DE_PLL_CTL);
@@ -1330,17 +1330,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
  	I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    BXT_DE_PLL_ENABLE,
  				    BXT_DE_PLL_LOCK,
  				    BXT_DE_PLL_LOCK,
  				    1))
  		DRM_ERROR("timeout waiting for DE PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+	i915->cdclk.hw.vco = vco;
  }
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+static void bxt_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -1352,14 +1352,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  	/* cdclk = vco / 2 / div{1,1.5,2,4} */
  	switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
  	default:
-		WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+		WARN_ON(cdclk != i915->cdclk.hw.bypass);
  		WARN_ON(vco != 0);
  		/* fall through */
  	case 2:
  		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  		break;
  	case 3:
-		WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+		WARN(IS_GEMINILAKE(i915), "Unsupported divider\n");
  		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
  		break;
  	case 4:
@@ -1375,7 +1375,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  	 * requires us to wait up to 150usec, but that leads to timeouts;
  	 * the 2ms used here is based on experiment.
  	 */
-	ret = sandybridge_pcode_write_timeout(dev_priv,
+	ret = sandybridge_pcode_write_timeout(i915,
  					      HSW_PCODE_DE_WRITE_FREQ_REQ,
  					      0x80000000, 150, 2);
  	if (ret) {
@@ -1384,12 +1384,12 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  		return;
  	}
- if (dev_priv->cdclk.hw.vco != 0 &&
-	    dev_priv->cdclk.hw.vco != vco)
-		bxt_de_pll_disable(dev_priv);
+	if (i915->cdclk.hw.vco != 0 &&
+	    i915->cdclk.hw.vco != vco)
+		bxt_de_pll_disable(i915);
- if (dev_priv->cdclk.hw.vco != vco)
-		bxt_de_pll_enable(dev_priv, vco);
+	if (i915->cdclk.hw.vco != vco)
+		bxt_de_pll_enable(i915, vco);
val = divider | skl_cdclk_decimal(cdclk);
  	if (pipe == INVALID_PIPE)
@@ -1405,7 +1405,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  	I915_WRITE(CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
-		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(i915, pipe);
/*
  	 * The timeout isn't specified, the 2ms used here is based on
@@ -1413,7 +1413,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  	 * FIXME: Waiting for the request completion could be delayed until
  	 * the next PCODE request based on BSpec.
  	 */
-	ret = sandybridge_pcode_write_timeout(dev_priv,
+	ret = sandybridge_pcode_write_timeout(i915,
  					      HSW_PCODE_DE_WRITE_FREQ_REQ,
  					      cdclk_state->voltage_level, 150, 2);
  	if (ret) {
@@ -1422,18 +1422,18 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
  		return;
  	}
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
  }
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *i915)
  {
  	u32 cdctl, expected;
- intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
- if (dev_priv->cdclk.hw.vco == 0 ||
-	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+	if (i915->cdclk.hw.vco == 0 ||
+	    i915->cdclk.hw.cdclk == i915->cdclk.hw.bypass)
  		goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1451,12 +1451,12 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
-		skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+		skl_cdclk_decimal(i915->cdclk.hw.cdclk);
  	/*
  	 * Disable SSA Precharge when CD clock frequency < 500 MHz,
  	 * enable otherwise.
  	 */
-	if (dev_priv->cdclk.hw.cdclk >= 500000)
+	if (i915->cdclk.hw.cdclk >= 500000)
  		expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (cdctl == expected)
@@ -1467,50 +1467,50 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
-	dev_priv->cdclk.hw.cdclk = 0;
+	i915->cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
-	dev_priv->cdclk.hw.vco = -1;
+	i915->cdclk.hw.vco = -1;
  }
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *i915)
  {
  	struct intel_cdclk_state cdclk_state;
- bxt_sanitize_cdclk(dev_priv);
+	bxt_sanitize_cdclk(i915);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
-	    dev_priv->cdclk.hw.vco != 0)
+	if (i915->cdclk.hw.cdclk != 0 &&
+	    i915->cdclk.hw.vco != 0)
  		return;
- cdclk_state = dev_priv->cdclk.hw;
+	cdclk_state = i915->cdclk.hw;
/*
  	 * FIXME:
  	 * - The initial CDCLK needs to be read from VBT.
  	 *   Need to make this change after VBT has changes for BXT.
  	 */
-	if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		cdclk_state.cdclk = glk_calc_cdclk(0);
-		cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
+		cdclk_state.vco = glk_de_pll_vco(i915, cdclk_state.cdclk);
  	} else {
  		cdclk_state.cdclk = bxt_calc_cdclk(0);
-		cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
+		cdclk_state.vco = bxt_de_pll_vco(i915, cdclk_state.cdclk);
  	}
  	cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	bxt_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *i915)
  {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_state cdclk_state = i915->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
  	cdclk_state.vco = 0;
  	cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	bxt_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
static int cnl_calc_cdclk(int min_cdclk)
@@ -1533,7 +1533,7 @@ static u8 cnl_calc_voltage_level(int cdclk)
  		return 0;
  }
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
+static void cnl_cdclk_pll_update(struct drm_i915_private *i915,
  				 struct intel_cdclk_state *cdclk_state)
  {
  	u32 val;
@@ -1555,13 +1555,13 @@ static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
  	cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
  }
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
+static void cnl_get_cdclk(struct drm_i915_private *i915,
  			 struct intel_cdclk_state *cdclk_state)
  {
  	u32 divider;
  	int div;
- cnl_cdclk_pll_update(dev_priv, cdclk_state);
+	cnl_cdclk_pll_update(i915, cdclk_state);
cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; @@ -1593,7 +1593,7 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
  		cnl_calc_voltage_level(cdclk_state->cdclk);
  }
-static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
+static void cnl_cdclk_pll_disable(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -1605,12 +1605,12 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
  	if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
  		DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+	i915->cdclk.hw.vco = 0;
  }
-static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
+static void cnl_cdclk_pll_enable(struct drm_i915_private *i915, int vco)
  {
-	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+	int ratio = DIV_ROUND_CLOSEST(vco, i915->cdclk.hw.ref);
  	u32 val;
val = CNL_CDCLK_PLL_RATIO(ratio);
@@ -1623,10 +1623,10 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
  	if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
  		DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+	i915->cdclk.hw.vco = vco;
  }
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+static void cnl_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -1635,7 +1635,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
  	u32 val, divider;
  	int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	ret = skl_pcode_request(i915, SKL_PCODE_CDCLK_CONTROL,
  				SKL_CDCLK_PREPARE_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1648,7 +1648,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
  	/* cdclk = vco / 2 / div{1,2} */
  	switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
  	default:
-		WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+		WARN_ON(cdclk != i915->cdclk.hw.bypass);
  		WARN_ON(vco != 0);
  		/* fall through */
  	case 2:
@@ -1659,12 +1659,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
  		break;
  	}
- if (dev_priv->cdclk.hw.vco != 0 &&
-	    dev_priv->cdclk.hw.vco != vco)
-		cnl_cdclk_pll_disable(dev_priv);
+	if (i915->cdclk.hw.vco != 0 &&
+	    i915->cdclk.hw.vco != vco)
+		cnl_cdclk_pll_disable(i915);
- if (dev_priv->cdclk.hw.vco != vco)
-		cnl_cdclk_pll_enable(dev_priv, vco);
+	if (i915->cdclk.hw.vco != vco)
+		cnl_cdclk_pll_enable(i915, vco);
val = divider | skl_cdclk_decimal(cdclk);
  	if (pipe == INVALID_PIPE)
@@ -1674,26 +1674,26 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
  	I915_WRITE(CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
-		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(i915, pipe);
/* inform PCU of the change */
-	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	sandybridge_pcode_write(i915, SKL_PCODE_CDCLK_CONTROL,
  				cdclk_state->voltage_level);
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
/*
  	 * Can't read out the voltage level :(
  	 * Let's just assume everything is as expected.
  	 */
-	dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+	i915->cdclk.hw.voltage_level = cdclk_state->voltage_level;
  }
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static int cnl_cdclk_pll_vco(struct drm_i915_private *i915, int cdclk)
  {
  	int ratio;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+	if (cdclk == i915->cdclk.hw.bypass)
  		return 0;
switch (cdclk) {
@@ -1702,25 +1702,25 @@ static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  		/* fall through */
  	case 168000:
  	case 336000:
-		ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
+		ratio = i915->cdclk.hw.ref == 19200 ? 35 : 28;
  		break;
  	case 528000:
-		ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
+		ratio = i915->cdclk.hw.ref == 19200 ? 55 : 44;
  		break;
  	}
- return dev_priv->cdclk.hw.ref * ratio;
+	return i915->cdclk.hw.ref * ratio;
  }
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_sanitize_cdclk(struct drm_i915_private *i915)
  {
  	u32 cdctl, expected;
- intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
- if (dev_priv->cdclk.hw.vco == 0 ||
-	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+	if (i915->cdclk.hw.vco == 0 ||
+	    i915->cdclk.hw.cdclk == i915->cdclk.hw.bypass)
  		goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1738,7 +1738,7 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
-		   skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+		   skl_cdclk_decimal(i915->cdclk.hw.cdclk);
if (cdctl == expected)
  		/* All well; nothing to sanitize */
@@ -1748,10 +1748,10 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
-	dev_priv->cdclk.hw.cdclk = 0;
+	i915->cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
-	dev_priv->cdclk.hw.vco = -1;
+	i915->cdclk.hw.vco = -1;
  }
static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
@@ -1781,11 +1781,11 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
  		return ranges[0];
  }
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static int icl_calc_cdclk_pll_vco(struct drm_i915_private *i915, int cdclk)
  {
  	int ratio;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+	if (cdclk == i915->cdclk.hw.bypass)
  		return 0;
switch (cdclk) {
@@ -1795,21 +1795,21 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  	case 307200:
  	case 556800:
  	case 652800:
-		WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
-			dev_priv->cdclk.hw.ref != 38400);
+		WARN_ON(i915->cdclk.hw.ref != 19200 &&
+			i915->cdclk.hw.ref != 38400);
  		break;
  	case 312000:
  	case 552000:
  	case 648000:
-		WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+		WARN_ON(i915->cdclk.hw.ref != 24000);
  	}
- ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
+	ratio = cdclk / (i915->cdclk.hw.ref / 2);
- return dev_priv->cdclk.hw.ref * ratio;
+	return i915->cdclk.hw.ref * ratio;
  }
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
+static void icl_set_cdclk(struct drm_i915_private *i915,
  			  const struct intel_cdclk_state *cdclk_state,
  			  enum pipe pipe)
  {
@@ -1817,7 +1817,7 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
  	unsigned int vco = cdclk_state->vco;
  	int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	ret = skl_pcode_request(i915, SKL_PCODE_CDCLK_CONTROL,
  				SKL_CDCLK_PREPARE_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE,
  				SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1827,12 +1827,12 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
  		return;
  	}
- if (dev_priv->cdclk.hw.vco != 0 &&
-	    dev_priv->cdclk.hw.vco != vco)
-		cnl_cdclk_pll_disable(dev_priv);
+	if (i915->cdclk.hw.vco != 0 &&
+	    i915->cdclk.hw.vco != vco)
+		cnl_cdclk_pll_disable(i915);
- if (dev_priv->cdclk.hw.vco != vco)
-		cnl_cdclk_pll_enable(dev_priv, vco);
+	if (i915->cdclk.hw.vco != vco)
+		cnl_cdclk_pll_enable(i915, vco);
/*
  	 * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
@@ -1842,16 +1842,16 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
  	I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
  			      skl_cdclk_decimal(cdclk));
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+	sandybridge_pcode_write(i915, SKL_PCODE_CDCLK_CONTROL,
  				cdclk_state->voltage_level);
- intel_update_cdclk(dev_priv);
+	intel_update_cdclk(i915);
/*
  	 * Can't read out the voltage level :(
  	 * Let's just assume everything is as expected.
  	 */
-	dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+	i915->cdclk.hw.voltage_level = cdclk_state->voltage_level;
  }
static u8 icl_calc_voltage_level(int cdclk)
@@ -1864,7 +1864,7 @@ static u8 icl_calc_voltage_level(int cdclk)
  		return 0;
  }
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
+static void icl_get_cdclk(struct drm_i915_private *i915,
  			  struct intel_cdclk_state *cdclk_state)
  {
  	u32 val;
@@ -1915,17 +1915,17 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
  		icl_calc_voltage_level(cdclk_state->cdclk);
  }
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
+static void icl_init_cdclk(struct drm_i915_private *i915)
  {
  	struct intel_cdclk_state sanitized_state;
  	u32 val;
- /* This sets dev_priv->cdclk.hw. */
-	intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	/* This sets i915->cdclk.hw. */
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
/* This means CDCLK disabled. */
-	if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+	if (i915->cdclk.hw.cdclk == i915->cdclk.hw.bypass)
  		goto sanitize;
val = I915_READ(CDCLK_CTL);
@@ -1934,7 +1934,7 @@ static void icl_init_cdclk(struct drm_i915_private *dev_priv)
  		goto sanitize;
if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
-	    skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
+	    skl_cdclk_decimal(i915->cdclk.hw.cdclk))
  		goto sanitize;
return;
@@ -1942,62 +1942,62 @@ static void icl_init_cdclk(struct drm_i915_private *dev_priv)
  sanitize:
  	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- sanitized_state.ref = dev_priv->cdclk.hw.ref;
+	sanitized_state.ref = i915->cdclk.hw.ref;
  	sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
-	sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
+	sanitized_state.vco = icl_calc_cdclk_pll_vco(i915,
  						     sanitized_state.cdclk);
  	sanitized_state.voltage_level =
  				icl_calc_voltage_level(sanitized_state.cdclk);
- icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
+	icl_set_cdclk(i915, &sanitized_state, INVALID_PIPE);
  }
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void icl_uninit_cdclk(struct drm_i915_private *i915)
  {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_state cdclk_state = i915->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
  	cdclk_state.vco = 0;
  	cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
- icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	icl_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_init_cdclk(struct drm_i915_private *i915)
  {
  	struct intel_cdclk_state cdclk_state;
- cnl_sanitize_cdclk(dev_priv);
+	cnl_sanitize_cdclk(i915);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
-	    dev_priv->cdclk.hw.vco != 0)
+	if (i915->cdclk.hw.cdclk != 0 &&
+	    i915->cdclk.hw.vco != 0)
  		return;
- cdclk_state = dev_priv->cdclk.hw;
+	cdclk_state = i915->cdclk.hw;
cdclk_state.cdclk = cnl_calc_cdclk(0);
-	cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+	cdclk_state.vco = cnl_cdclk_pll_vco(i915, cdclk_state.cdclk);
  	cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	cnl_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_uninit_cdclk(struct drm_i915_private *i915)
  {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_state cdclk_state = i915->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
  	cdclk_state.vco = 0;
  	cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	cnl_set_cdclk(i915, &cdclk_state, INVALID_PIPE);
  }
/**
   * intel_cdclk_init - Initialize CDCLK
   * @i915: i915 device
   *
- * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * Initialize CDCLK. This consists mainly of initializing i915->cdclk.hw and
   * sanitizing the state of the hardware if needed. This is generally done only
   * during the display core initialization sequence, after which the DMC will
   * take care of turning CDCLK off/on as needed.
@@ -2051,19 +2051,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
/**
   * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
+ * @i915: Not a CDCLK state, it's the drm_i915_private!
   * @a: first CDCLK state
   * @b: second CDCLK state
   *
   * Returns:
   * True if the CDCLK states require just a cd2x divider update, false if not.
   */
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *i915,
  				   const struct intel_cdclk_state *a,
  				   const struct intel_cdclk_state *b)
  {
  	/* Older hw doesn't have the capability */
-	if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+	if (INTEL_GEN(i915) < 10 && !IS_GEN9_LP(i915))
  		return false;
return a->cdclk != b->cdclk &&
@@ -2100,10 +2100,10 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
   */
  void intel_cdclk_swap_state(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
- swap(state->cdclk.logical, dev_priv->cdclk.logical);
-	swap(state->cdclk.actual, dev_priv->cdclk.actual);
+	swap(state->cdclk.logical, i915->cdclk.logical);
+	swap(state->cdclk.actual, i915->cdclk.actual);
  }
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
@@ -2117,37 +2117,37 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
/**
   * intel_set_cdclk - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @cdclk_state: new CDCLK state
   * @pipe: pipe with which to synchronize the update
   *
   * Program the hardware based on the passed in CDCLK state,
   * if necessary.
   */
-static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+static void intel_set_cdclk(struct drm_i915_private *i915,
  			    const struct intel_cdclk_state *cdclk_state,
  			    enum pipe pipe)
  {
-	if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+	if (!intel_cdclk_changed(&i915->cdclk.hw, cdclk_state))
  		return;
- if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+	if (WARN_ON_ONCE(!i915->display.set_cdclk))
  		return;
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to"); - dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+	i915->display.set_cdclk(i915, cdclk_state, pipe);
- if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
+	if (WARN(intel_cdclk_changed(&i915->cdclk.hw, cdclk_state),
  		 "cdclk state doesn't match!\n")) {
-		intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
+		intel_dump_cdclk_state(&i915->cdclk.hw, "[hw state]");
  		intel_dump_cdclk_state(cdclk_state, "[sw state]");
  	}
  }
/**
   * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @old_state: old CDCLK state
   * @new_state: new CDCLK state
   * @pipe: pipe with which to synchronize the update
@@ -2156,18 +2156,18 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
   * in CDCLK state, if necessary.
   */
  void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *i915,
  				 const struct intel_cdclk_state *old_state,
  				 const struct intel_cdclk_state *new_state,
  				 enum pipe pipe)
  {
  	if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
-		intel_set_cdclk(dev_priv, new_state, pipe);
+		intel_set_cdclk(i915, new_state, pipe);
  }
/**
   * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @old_state: old CDCLK state
   * @new_state: new CDCLK state
   * @pipe: pipe with which to synchronize the update
@@ -2176,24 +2176,24 @@ intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
   * in CDCLK state, if necessary.
   */
  void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+intel_set_cdclk_post_plane_update(struct drm_i915_private *i915,
  				  const struct intel_cdclk_state *old_state,
  				  const struct intel_cdclk_state *new_state,
  				  enum pipe pipe)
  {
  	if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
-		intel_set_cdclk(dev_priv, new_state, pipe);
+		intel_set_cdclk(i915, new_state, pipe);
  }
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
+static int intel_pixel_rate_to_cdclk(struct drm_i915_private *i915,
  				     int pixel_rate)
  {
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		return DIV_ROUND_UP(pixel_rate, 2);
-	else if (IS_GEN(dev_priv, 9) ||
-		 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+	else if (IS_GEN(i915, 9) ||
+		 IS_BROADWELL(i915) || IS_HASWELL(i915))
  		return pixel_rate;
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (IS_CHERRYVIEW(i915))
  		return DIV_ROUND_UP(pixel_rate * 100, 95);
  	else
  		return DIV_ROUND_UP(pixel_rate * 100, 90);
@@ -2201,17 +2201,17 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(crtc_state->base.crtc->dev);
  	int min_cdclk;
if (!crtc_state->base.enable)
  		return 0;
- min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+	min_cdclk = intel_pixel_rate_to_cdclk(i915, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-	if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
+	if (IS_BROADWELL(i915) && hsw_crtc_state_ips_capable(crtc_state))
  		min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
@@ -2223,10 +2223,10 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
  	    crtc_state->has_audio &&
  	    crtc_state->port_clock >= 540000 &&
  	    crtc_state->lane_count == 4) {
-		if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+		if (IS_CANNONLAKE(i915) || IS_GEMINILAKE(i915)) {
  			/* Display WA #1145: glk,cnl */
  			min_cdclk = max(316800, min_cdclk);
-		} else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
+		} else if (IS_GEN(i915, 9) || IS_BROADWELL(i915)) {
  			/* Display WA #1144: skl,bxt */
  			min_cdclk = max(432000, min_cdclk);
  		}
@@ -2236,7 +2236,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
  	 * According to BSpec, "The CD clock frequency must be at least twice
  	 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
  	 */
-	if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+	if (crtc_state->has_audio && INTEL_GEN(i915) >= 9)
  		min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2244,7 +2244,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
  	 * than 320000KHz.
  	 */
  	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
-	    IS_VALLEYVIEW(dev_priv))
+	    IS_VALLEYVIEW(i915))
  		min_cdclk = max(320000, min_cdclk);
/*
@@ -2253,12 +2253,12 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
  	 * correct for DSI PLL and DE PLL.
  	 */
  	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
-	    IS_GEMINILAKE(dev_priv))
+	    IS_GEMINILAKE(i915))
  		min_cdclk = max(158400, min_cdclk);
- if (min_cdclk > dev_priv->max_cdclk_freq) {
+	if (min_cdclk > i915->max_cdclk_freq) {
  		DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
-			      min_cdclk, dev_priv->max_cdclk_freq);
+			      min_cdclk, i915->max_cdclk_freq);
  		return -EINVAL;
  	}
@@ -2267,13 +2267,13 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) static int intel_compute_min_cdclk(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc *crtc;
  	struct intel_crtc_state *crtc_state;
  	int min_cdclk, i;
  	enum pipe pipe;
- memcpy(state->min_cdclk, dev_priv->min_cdclk,
+	memcpy(state->min_cdclk, i915->min_cdclk,
  	       sizeof(state->min_cdclk));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
@@ -2285,7 +2285,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
  	}
min_cdclk = state->cdclk.force_min_cdclk;
-	for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
@@ -2302,14 +2302,14 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
   */
  static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc *crtc;
  	struct intel_crtc_state *crtc_state;
  	u8 min_voltage_level;
  	int i;
  	enum pipe pipe;
- memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
+	memcpy(state->min_voltage_level, i915->min_voltage_level,
  	       sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
@@ -2321,7 +2321,7 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
  	}
min_voltage_level = 0;
-	for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		min_voltage_level = max(state->min_voltage_level[pipe],
  					min_voltage_level);
@@ -2330,25 +2330,25 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state) static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
  	if (min_cdclk < 0)
  		return min_cdclk;
- cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
+	cdclk = vlv_calc_cdclk(i915, min_cdclk);
state->cdclk.logical.cdclk = cdclk;
  	state->cdclk.logical.voltage_level =
-		vlv_calc_voltage_level(dev_priv, cdclk);
+		vlv_calc_voltage_level(i915, cdclk);
if (!state->active_crtcs) {
-		cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+		cdclk = vlv_calc_cdclk(i915, state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
  		state->cdclk.actual.voltage_level =
-			vlv_calc_voltage_level(dev_priv, cdclk);
+			vlv_calc_voltage_level(i915, cdclk);
  	} else {
  		state->cdclk.actual = state->cdclk.logical;
  	}
@@ -2389,14 +2389,14 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
static int skl_dpll0_vco(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc *crtc;
  	struct intel_crtc_state *crtc_state;
  	int vco, i;
vco = state->cdclk.logical.vco;
  	if (!vco)
-		vco = dev_priv->skl_preferred_vco_freq;
+		vco = i915->skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
  		if (!crtc_state->base.enable)
@@ -2460,19 +2460,19 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
  	if (min_cdclk < 0)
  		return min_cdclk;
- if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		cdclk = glk_calc_cdclk(min_cdclk);
-		vco = glk_de_pll_vco(dev_priv, cdclk);
+		vco = glk_de_pll_vco(i915, cdclk);
  	} else {
  		cdclk = bxt_calc_cdclk(min_cdclk);
-		vco = bxt_de_pll_vco(dev_priv, cdclk);
+		vco = bxt_de_pll_vco(i915, cdclk);
  	}
state->cdclk.logical.vco = vco;
@@ -2481,12 +2481,12 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
  		bxt_calc_voltage_level(cdclk);
if (!state->active_crtcs) {
-		if (IS_GEMINILAKE(dev_priv)) {
+		if (IS_GEMINILAKE(i915)) {
  			cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
-			vco = glk_de_pll_vco(dev_priv, cdclk);
+			vco = glk_de_pll_vco(i915, cdclk);
  		} else {
  			cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
-			vco = bxt_de_pll_vco(dev_priv, cdclk);
+			vco = bxt_de_pll_vco(i915, cdclk);
  		}
state->cdclk.actual.vco = vco;
@@ -2502,7 +2502,7 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2510,7 +2510,7 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
  		return min_cdclk;
cdclk = cnl_calc_cdclk(min_cdclk);
-	vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+	vco = cnl_cdclk_pll_vco(i915, cdclk);
state->cdclk.logical.vco = vco;
  	state->cdclk.logical.cdclk = cdclk;
@@ -2520,7 +2520,7 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
if (!state->active_crtcs) {
  		cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
-		vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+		vco = cnl_cdclk_pll_vco(i915, cdclk);
state->cdclk.actual.vco = vco;
  		state->cdclk.actual.cdclk = cdclk;
@@ -2535,7 +2535,7 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	unsigned int ref = state->cdclk.logical.ref;
  	int min_cdclk, cdclk, vco;
@@ -2544,7 +2544,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
  		return min_cdclk;
cdclk = icl_calc_cdclk(min_cdclk, ref);
-	vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+	vco = icl_calc_cdclk_pll_vco(i915, cdclk);
state->cdclk.logical.vco = vco;
  	state->cdclk.logical.cdclk = cdclk;
@@ -2554,7 +2554,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
if (!state->active_crtcs) {
  		cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
-		vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+		vco = icl_calc_cdclk_pll_vco(i915, cdclk);
state->cdclk.actual.vco = vco;
  		state->cdclk.actual.cdclk = cdclk;
@@ -2567,18 +2567,18 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
  	return 0;
  }
-static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+static int intel_compute_max_dotclk(struct drm_i915_private *i915)
  {
-	int max_cdclk_freq = dev_priv->max_cdclk_freq;
+	int max_cdclk_freq = i915->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		return 2 * max_cdclk_freq;
-	else if (IS_GEN(dev_priv, 9) ||
-		 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+	else if (IS_GEN(i915, 9) ||
+		 IS_BROADWELL(i915) || IS_HASWELL(i915))
  		return max_cdclk_freq;
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (IS_CHERRYVIEW(i915))
  		return max_cdclk_freq*95/100;
-	else if (INTEL_GEN(dev_priv) < 4)
+	else if (INTEL_GEN(i915) < 4)
  		return 2*max_cdclk_freq*90/100;
  	else
  		return max_cdclk_freq*90/100;
@@ -2586,26 +2586,26 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
/**
   * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
- * @dev_priv: i915 device
+ * @i915: i915 device
   *
   * Determine the maximum CDCLK frequency the platform supports, and also
   * derive the maximum dot clock frequency the maximum CDCLK frequency
   * allows.
   */
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
+void intel_update_max_cdclk(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 11) {
-		if (dev_priv->cdclk.hw.ref == 24000)
-			dev_priv->max_cdclk_freq = 648000;
+	if (INTEL_GEN(i915) >= 11) {
+		if (i915->cdclk.hw.ref == 24000)
+			i915->max_cdclk_freq = 648000;
  		else
-			dev_priv->max_cdclk_freq = 652800;
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		dev_priv->max_cdclk_freq = 528000;
-	} else if (IS_GEN9_BC(dev_priv)) {
+			i915->max_cdclk_freq = 652800;
+	} else if (IS_CANNONLAKE(i915)) {
+		i915->max_cdclk_freq = 528000;
+	} else if (IS_GEN9_BC(i915)) {
  		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
  		int max_cdclk, vco;
- vco = dev_priv->skl_preferred_vco_freq;
+		vco = i915->skl_preferred_vco_freq;
  		WARN_ON(vco != 8100000 && vco != 8640000);
/*
@@ -2622,12 +2622,12 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
  		else
  			max_cdclk = 308571;
- dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		dev_priv->max_cdclk_freq = 316800;
-	} else if (IS_BROXTON(dev_priv)) {
-		dev_priv->max_cdclk_freq = 624000;
-	} else if (IS_BROADWELL(dev_priv))  {
+		i915->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+	} else if (IS_GEMINILAKE(i915)) {
+		i915->max_cdclk_freq = 316800;
+	} else if (IS_BROXTON(i915)) {
+		i915->max_cdclk_freq = 624000;
+	} else if (IS_BROADWELL(i915))  {
  		/*
  		 * FIXME with extra cooling we can allow
  		 * 540 MHz for ULX and 675 Mhz for ULT.
@@ -2635,40 +2635,40 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
  		 * available? PCI ID, VTB, something else?
  		 */
  		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-			dev_priv->max_cdclk_freq = 450000;
-		else if (IS_BDW_ULX(dev_priv))
-			dev_priv->max_cdclk_freq = 450000;
-		else if (IS_BDW_ULT(dev_priv))
-			dev_priv->max_cdclk_freq = 540000;
+			i915->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULX(i915))
+			i915->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULT(i915))
+			i915->max_cdclk_freq = 540000;
  		else
-			dev_priv->max_cdclk_freq = 675000;
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->max_cdclk_freq = 320000;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev_priv->max_cdclk_freq = 400000;
+			i915->max_cdclk_freq = 675000;
+	} else if (IS_CHERRYVIEW(i915)) {
+		i915->max_cdclk_freq = 320000;
+	} else if (IS_VALLEYVIEW(i915)) {
+		i915->max_cdclk_freq = 400000;
  	} else {
  		/* otherwise assume cdclk is fixed */
-		dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+		i915->max_cdclk_freq = i915->cdclk.hw.cdclk;
  	}
- dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+	i915->max_dotclk_freq = intel_compute_max_dotclk(i915);
DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
-			 dev_priv->max_cdclk_freq);
+			 i915->max_cdclk_freq);
DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
-			 dev_priv->max_dotclk_freq);
+			 i915->max_dotclk_freq);
  }
/**
   * intel_update_cdclk - Determine the current CDCLK frequency
- * @dev_priv: i915 device
+ * @i915: i915 device
   *
   * Determine the current CDCLK frequency.
   */
-void intel_update_cdclk(struct drm_i915_private *dev_priv)
+void intel_update_cdclk(struct drm_i915_private *i915)
  {
-	dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+	i915->display.get_cdclk(i915, &i915->cdclk.hw);
/*
  	 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2676,12 +2676,12 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
  	 * of cdclk that generates 4MHz reference clock freq which is used to
  	 * generate GMBus clock. This will vary with the cdclk freq.
  	 */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		I915_WRITE(GMBUSFREQ_VLV,
-			   DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+			   DIV_ROUND_UP(i915->cdclk.hw.cdclk, 1000));
  }
-static int cnp_rawclk(struct drm_i915_private *dev_priv)
+static int cnp_rawclk(struct drm_i915_private *i915)
  {
  	u32 rawclk;
  	int divider, fraction;
@@ -2702,7 +2702,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
  							   fraction) - 1);
-		if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+		if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
  			rawclk |= ICP_RAWCLK_NUM(numerator);
  	}
@@ -2710,19 +2710,19 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
  	return divider + fraction;
  }
-static int pch_rawclk(struct drm_i915_private *dev_priv)
+static int pch_rawclk(struct drm_i915_private *i915)
  {
  	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
  }
-static int vlv_hrawclk(struct drm_i915_private *dev_priv)
+static int vlv_hrawclk(struct drm_i915_private *i915)
  {
  	/* RAWCLK_FREQ_VLV register updated from power well code */
-	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+	return vlv_get_cck_clock_hpll(i915, "hrawclk",
  				      CCK_DISPLAY_REF_CLOCK_CONTROL);
  }
-static int g4x_hrawclk(struct drm_i915_private *dev_priv)
+static int g4x_hrawclk(struct drm_i915_private *i915)
  {
  	u32 clkcfg;
@@ -2750,104 +2750,104 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) /**
   * intel_update_rawclk - Determine the current RAWCLK frequency
- * @dev_priv: i915 device
+ * @i915: i915 device
   *
   * Determine the current RAWCLK frequency. RAWCLK is a fixed
   * frequency clock so this needs to done only once.
   */
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
-		dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
-	else if (HAS_PCH_SPLIT(dev_priv))
-		dev_priv->rawclk_freq = pch_rawclk(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
-	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
-		dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+void intel_update_rawclk(struct drm_i915_private *i915)
+{
+	if (INTEL_PCH_TYPE(i915) >= PCH_CNP)
+		i915->rawclk_freq = cnp_rawclk(i915);
+	else if (HAS_PCH_SPLIT(i915))
+		i915->rawclk_freq = pch_rawclk(i915);
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->rawclk_freq = vlv_hrawclk(i915);
+	else if (IS_G4X(i915) || IS_PINEVIEW(i915))
+		i915->rawclk_freq = g4x_hrawclk(i915);
  	else
  		/* no rawclk on other platforms, or no need to know it */
  		return;
- DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", i915->rawclk_freq);
  }
/**
   * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
- * @dev_priv: i915 device
+ * @i915: i915 device
   */
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) >= 11) {
-		dev_priv->display.set_cdclk = icl_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		dev_priv->display.set_cdclk = cnl_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
-	} else if (IS_GEN9_LP(dev_priv)) {
-		dev_priv->display.set_cdclk = bxt_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
-	} else if (IS_GEN9_BC(dev_priv)) {
-		dev_priv->display.set_cdclk = skl_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
-	} else if (IS_BROADWELL(dev_priv)) {
-		dev_priv->display.set_cdclk = bdw_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.set_cdclk = chv_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev_priv->display.set_cdclk = vlv_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+void intel_init_cdclk_hooks(struct drm_i915_private *i915)
+{
+	if (INTEL_GEN(i915) >= 11) {
+		i915->display.set_cdclk = icl_set_cdclk;
+		i915->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+	} else if (IS_CANNONLAKE(i915)) {
+		i915->display.set_cdclk = cnl_set_cdclk;
+		i915->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+	} else if (IS_GEN9_LP(i915)) {
+		i915->display.set_cdclk = bxt_set_cdclk;
+		i915->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+	} else if (IS_GEN9_BC(i915)) {
+		i915->display.set_cdclk = skl_set_cdclk;
+		i915->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
+	} else if (IS_BROADWELL(i915)) {
+		i915->display.set_cdclk = bdw_set_cdclk;
+		i915->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
+	} else if (IS_CHERRYVIEW(i915)) {
+		i915->display.set_cdclk = chv_set_cdclk;
+		i915->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+	} else if (IS_VALLEYVIEW(i915)) {
+		i915->display.set_cdclk = vlv_set_cdclk;
+		i915->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
  	}
- if (INTEL_GEN(dev_priv) >= 11)
-		dev_priv->display.get_cdclk = icl_get_cdclk;
-	else if (IS_CANNONLAKE(dev_priv))
-		dev_priv->display.get_cdclk = cnl_get_cdclk;
-	else if (IS_GEN9_LP(dev_priv))
-		dev_priv->display.get_cdclk = bxt_get_cdclk;
-	else if (IS_GEN9_BC(dev_priv))
-		dev_priv->display.get_cdclk = skl_get_cdclk;
-	else if (IS_BROADWELL(dev_priv))
-		dev_priv->display.get_cdclk = bdw_get_cdclk;
-	else if (IS_HASWELL(dev_priv))
-		dev_priv->display.get_cdclk = hsw_get_cdclk;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->display.get_cdclk = vlv_get_cdclk;
-	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-		dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-	else if (IS_GEN(dev_priv, 5))
-		dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
-	else if (IS_GM45(dev_priv))
-		dev_priv->display.get_cdclk = gm45_get_cdclk;
-	else if (IS_G45(dev_priv))
-		dev_priv->display.get_cdclk = g33_get_cdclk;
-	else if (IS_I965GM(dev_priv))
-		dev_priv->display.get_cdclk = i965gm_get_cdclk;
-	else if (IS_I965G(dev_priv))
-		dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-	else if (IS_PINEVIEW(dev_priv))
-		dev_priv->display.get_cdclk = pnv_get_cdclk;
-	else if (IS_G33(dev_priv))
-		dev_priv->display.get_cdclk = g33_get_cdclk;
-	else if (IS_I945GM(dev_priv))
-		dev_priv->display.get_cdclk = i945gm_get_cdclk;
-	else if (IS_I945G(dev_priv))
-		dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-	else if (IS_I915GM(dev_priv))
-		dev_priv->display.get_cdclk = i915gm_get_cdclk;
-	else if (IS_I915G(dev_priv))
-		dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
-	else if (IS_I865G(dev_priv))
-		dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
-	else if (IS_I85X(dev_priv))
-		dev_priv->display.get_cdclk = i85x_get_cdclk;
-	else if (IS_I845G(dev_priv))
-		dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
+	if (INTEL_GEN(i915) >= 11)
+		i915->display.get_cdclk = icl_get_cdclk;
+	else if (IS_CANNONLAKE(i915))
+		i915->display.get_cdclk = cnl_get_cdclk;
+	else if (IS_GEN9_LP(i915))
+		i915->display.get_cdclk = bxt_get_cdclk;
+	else if (IS_GEN9_BC(i915))
+		i915->display.get_cdclk = skl_get_cdclk;
+	else if (IS_BROADWELL(i915))
+		i915->display.get_cdclk = bdw_get_cdclk;
+	else if (IS_HASWELL(i915))
+		i915->display.get_cdclk = hsw_get_cdclk;
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->display.get_cdclk = vlv_get_cdclk;
+	else if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915))
+		i915->display.get_cdclk = fixed_400mhz_get_cdclk;
+	else if (IS_GEN(i915, 5))
+		i915->display.get_cdclk = fixed_450mhz_get_cdclk;
+	else if (IS_GM45(i915))
+		i915->display.get_cdclk = gm45_get_cdclk;
+	else if (IS_G45(i915))
+		i915->display.get_cdclk = g33_get_cdclk;
+	else if (IS_I965GM(i915))
+		i915->display.get_cdclk = i965gm_get_cdclk;
+	else if (IS_I965G(i915))
+		i915->display.get_cdclk = fixed_400mhz_get_cdclk;
+	else if (IS_PINEVIEW(i915))
+		i915->display.get_cdclk = pnv_get_cdclk;
+	else if (IS_G33(i915))
+		i915->display.get_cdclk = g33_get_cdclk;
+	else if (IS_I945GM(i915))
+		i915->display.get_cdclk = i945gm_get_cdclk;
+	else if (IS_I945G(i915))
+		i915->display.get_cdclk = fixed_400mhz_get_cdclk;
+	else if (IS_I915GM(i915))
+		i915->display.get_cdclk = i915gm_get_cdclk;
+	else if (IS_I915G(i915))
+		i915->display.get_cdclk = fixed_333mhz_get_cdclk;
+	else if (IS_I865G(i915))
+		i915->display.get_cdclk = fixed_266mhz_get_cdclk;
+	else if (IS_I85X(i915))
+		i915->display.get_cdclk = i85x_get_cdclk;
+	else if (IS_I845G(i915))
+		i915->display.get_cdclk = fixed_200mhz_get_cdclk;
  	else { /* 830 */
-		WARN(!IS_I830(dev_priv),
+		WARN(!IS_I830(i915),
  		     "Unknown platform. Assuming 133 MHz CDCLK\n");
-		dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+		i915->display.get_cdclk = fixed_133mhz_get_cdclk;
  	}
  }
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/intel_cdclk.h
index 4d6f7f5f8930..4e17102af66c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/intel_cdclk.h
@@ -18,11 +18,11 @@ struct intel_crtc_state;
  int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
  void intel_cdclk_init(struct drm_i915_private *i915);
  void intel_cdclk_uninit(struct drm_i915_private *i915);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+void intel_init_cdclk_hooks(struct drm_i915_private *i915);
+void intel_update_max_cdclk(struct drm_i915_private *i915);
+void intel_update_cdclk(struct drm_i915_private *i915);
+void intel_update_rawclk(struct drm_i915_private *i915);
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *i915,
  				   const struct intel_cdclk_state *a,
  				   const struct intel_cdclk_state *b);
  bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
@@ -31,12 +31,12 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
  			 const struct intel_cdclk_state *b);
  void intel_cdclk_swap_state(struct intel_atomic_state *state);
  void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *i915,
  				 const struct intel_cdclk_state *old_state,
  				 const struct intel_cdclk_state *new_state,
  				 enum pipe pipe);
  void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+intel_set_cdclk_post_plane_update(struct drm_i915_private *i915,
  				  const struct intel_cdclk_state *old_state,
  				  const struct intel_cdclk_state *new_state,
  				  enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 0b8cf3e8c963..efc67e4a0ba5 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -137,7 +137,7 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
  				const u16 coeff[9],
  				const u16 postoff[3])
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
@@ -153,7 +153,7 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
  	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
  	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
- if (INTEL_GEN(dev_priv) >= 7) {
+	if (INTEL_GEN(i915) >= 7) {
  		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
  		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
  		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
@@ -165,7 +165,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
  				  const u16 coeff[9],
  				  const u16 postoff[3])
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
@@ -188,15 +188,15 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
/*
  	 * FIXME if there's a gamma LUT after the CSC, we should
  	 * do the range compression using the gamma LUT instead.
  	 */
  	return crtc_state->limited_color_range &&
-		(IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
-		 IS_GEN_RANGE(dev_priv, 9, 10));
+		(IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+		 IS_GEN_RANGE(i915, 9, 10));
  }
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
@@ -254,7 +254,7 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
  static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->base.ctm) {
@@ -280,7 +280,7 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  		 * LUT is needed but CSC is not we need to load an
  		 * identity matrix.
  		 */
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+		WARN_ON(!IS_CANNONLAKE(i915) && !IS_GEMINILAKE(i915));
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
  				    ilk_csc_coeff_identity,
@@ -293,7 +293,7 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (crtc_state->base.ctm) {
  		u16 coeff[9];
@@ -322,7 +322,7 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
if (crtc_state->base.ctm) {
@@ -388,15 +388,15 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
  				    const struct drm_property_blob *blob)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	int i;
- if (HAS_GMCH(dev_priv)) {
+	if (HAS_GMCH(i915)) {
  		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-			assert_dsi_pll_enabled(dev_priv);
+			assert_dsi_pll_enabled(i915);
  		else
-			assert_pll_enabled(dev_priv, pipe);
+			assert_pll_enabled(i915, pipe);
  	}
if (blob) {
@@ -408,7 +408,7 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
  				(drm_color_lut_extract(lut[i].green, 8) << 8) |
  				drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH(dev_priv))
+			if (HAS_GMCH(i915))
  				I915_WRITE(PALETTE(pipe, i), word);
  			else
  				I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -424,7 +424,7 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
  static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
@@ -437,7 +437,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
  static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
@@ -452,7 +452,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
  static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); @@ -462,7 +462,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
  static void skl_color_commit(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 val = 0;
@@ -479,7 +479,7 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state) I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_load_csc_matrix(crtc_state);
  	else
  		ilk_load_csc_matrix(crtc_state);
@@ -488,7 +488,7 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
  static void i965_load_lut_10p6(struct intel_crtc *crtc,
  			       const struct drm_property_blob *blob)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
  	enum pipe pipe = crtc->pipe;
@@ -519,7 +519,7 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state)
  static void ilk_load_lut_10(struct intel_crtc *crtc,
  			    const struct drm_property_blob *blob)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
  	enum pipe pipe = crtc->pipe;
@@ -556,7 +556,7 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
  			    const struct drm_property_blob *blob,
  			    u32 prec_index)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int hw_lut_size = ivb_lut_10_size(prec_index);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
@@ -583,7 +583,7 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
  			    const struct drm_property_blob *blob,
  			    u32 prec_index)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int hw_lut_size = ivb_lut_10_size(prec_index);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
@@ -609,7 +609,7 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_10_max(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
@@ -622,7 +622,7 @@ static void ivb_load_lut_10_max(struct intel_crtc *crtc)
  	 * ToDo: Extend the ABI to be able to program values
  	 * from 3.0 to 7.0
  	 */
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)) {
  		I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
  		I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
  		I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
@@ -678,9 +678,9 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
  static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
-	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+	const u32 lut_size = INTEL_INFO(i915)->color.degamma_lut_size;
  	const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
  	u32 i;
@@ -717,9 +717,9 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
  static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
-	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+	const u32 lut_size = INTEL_INFO(i915)->color.degamma_lut_size;
  	u32 i;
/*
@@ -798,7 +798,7 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
  static void chv_load_cgm_degamma(struct intel_crtc *crtc,
  				 const struct drm_property_blob *blob)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
  	enum pipe pipe = crtc->pipe;
@@ -825,7 +825,7 @@ static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
  static void chv_load_cgm_gamma(struct intel_crtc *crtc,
  			       const struct drm_property_blob *blob)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_color_lut *lut = blob->data;
  	int i, lut_size = drm_color_lut_size(blob);
  	enum pipe pipe = crtc->pipe;
@@ -860,37 +860,37 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- dev_priv->display.load_luts(crtc_state);
+	i915->display.load_luts(crtc_state);
  }
void intel_color_commit(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- dev_priv->display.color_commit(crtc_state);
+	i915->display.color_commit(crtc_state);
  }
int intel_color_check(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- return dev_priv->display.color_check(crtc_state);
+	return i915->display.color_check(crtc_state);
  }
void intel_color_get_config(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- if (dev_priv->display.read_luts)
-		dev_priv->display.read_luts(crtc_state);
+	if (i915->display.read_luts)
+		i915->display.read_luts(crtc_state);
  }
static bool need_plane_update(struct intel_plane *plane,
  			      const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
/*
  	 * On pre-SKL the pipe gamma enable and pipe csc enable for
@@ -898,7 +898,7 @@ static bool need_plane_update(struct intel_plane *plane,
  	 * We have to reconfigure that even if the plane is inactive.
  	 */
  	return crtc_state->active_planes & BIT(plane->id) ||
-		(INTEL_GEN(dev_priv) < 9 &&
+		(INTEL_GEN(i915) < 9 &&
  		 plane->id == PLANE_PRIMARY);
  }
@@ -906,7 +906,7 @@ static int
  intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_atomic_state *state =
  		to_intel_atomic_state(new_crtc_state->base.state);
  	const struct intel_crtc_state *old_crtc_state =
@@ -921,7 +921,7 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
  	    new_crtc_state->csc_enable == old_crtc_state->csc_enable)
  		return 0;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  		struct intel_plane_state *plane_state;
if (!need_plane_update(plane, new_crtc_state))
@@ -956,7 +956,7 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
static int check_luts(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
  	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
  	int gamma_length, degamma_length;
@@ -972,10 +972,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
  		return -EINVAL;
  	}
- degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-	gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
-	degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
-	gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
+	degamma_length = INTEL_INFO(i915)->color.degamma_lut_size;
+	gamma_length = INTEL_INFO(i915)->color.gamma_lut_size;
+	degamma_tests = INTEL_INFO(i915)->color.degamma_lut_tests;
+	gamma_tests = INTEL_INFO(i915)->color.gamma_lut_tests;
if (check_lut_size(degamma_lut, degamma_length) ||
  	    check_lut_size(gamma_lut, gamma_length))
@@ -1255,56 +1255,56 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
void intel_color_init(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	bool has_ctm = INTEL_INFO(i915)->color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256); - if (HAS_GMCH(dev_priv)) {
-		if (IS_CHERRYVIEW(dev_priv)) {
-			dev_priv->display.color_check = chv_color_check;
-			dev_priv->display.color_commit = i9xx_color_commit;
-			dev_priv->display.load_luts = chv_load_luts;
-		} else if (INTEL_GEN(dev_priv) >= 4) {
-			dev_priv->display.color_check = i9xx_color_check;
-			dev_priv->display.color_commit = i9xx_color_commit;
-			dev_priv->display.load_luts = i965_load_luts;
+	if (HAS_GMCH(i915)) {
+		if (IS_CHERRYVIEW(i915)) {
+			i915->display.color_check = chv_color_check;
+			i915->display.color_commit = i9xx_color_commit;
+			i915->display.load_luts = chv_load_luts;
+		} else if (INTEL_GEN(i915) >= 4) {
+			i915->display.color_check = i9xx_color_check;
+			i915->display.color_commit = i9xx_color_commit;
+			i915->display.load_luts = i965_load_luts;
  		} else {
-			dev_priv->display.color_check = i9xx_color_check;
-			dev_priv->display.color_commit = i9xx_color_commit;
-			dev_priv->display.load_luts = i9xx_load_luts;
+			i915->display.color_check = i9xx_color_check;
+			i915->display.color_commit = i9xx_color_commit;
+			i915->display.load_luts = i9xx_load_luts;
  		}
  	} else {
-		if (INTEL_GEN(dev_priv) >= 11)
-			dev_priv->display.color_check = icl_color_check;
-		else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-			dev_priv->display.color_check = glk_color_check;
-		else if (INTEL_GEN(dev_priv) >= 7)
-			dev_priv->display.color_check = ivb_color_check;
+		if (INTEL_GEN(i915) >= 11)
+			i915->display.color_check = icl_color_check;
+		else if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
+			i915->display.color_check = glk_color_check;
+		else if (INTEL_GEN(i915) >= 7)
+			i915->display.color_check = ivb_color_check;
  		else
-			dev_priv->display.color_check = ilk_color_check;
+			i915->display.color_check = ilk_color_check;
- if (INTEL_GEN(dev_priv) >= 9)
-			dev_priv->display.color_commit = skl_color_commit;
-		else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-			dev_priv->display.color_commit = hsw_color_commit;
+		if (INTEL_GEN(i915) >= 9)
+			i915->display.color_commit = skl_color_commit;
+		else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
+			i915->display.color_commit = hsw_color_commit;
  		else
-			dev_priv->display.color_commit = ilk_color_commit;
-
-		if (INTEL_GEN(dev_priv) >= 11)
-			dev_priv->display.load_luts = icl_load_luts;
-		else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-			dev_priv->display.load_luts = glk_load_luts;
-		else if (INTEL_GEN(dev_priv) >= 8)
-			dev_priv->display.load_luts = bdw_load_luts;
-		else if (INTEL_GEN(dev_priv) >= 7)
-			dev_priv->display.load_luts = ivb_load_luts;
+			i915->display.color_commit = ilk_color_commit;
+
+		if (INTEL_GEN(i915) >= 11)
+			i915->display.load_luts = icl_load_luts;
+		else if (IS_CANNONLAKE(i915) || IS_GEMINILAKE(i915))
+			i915->display.load_luts = glk_load_luts;
+		else if (INTEL_GEN(i915) >= 8)
+			i915->display.load_luts = bdw_load_luts;
+		else if (INTEL_GEN(i915) >= 7)
+			i915->display.load_luts = ivb_load_luts;
  		else
-			dev_priv->display.load_luts = ilk_load_luts;
+			i915->display.load_luts = ilk_load_luts;
  	}
drm_crtc_enable_color_mgmt(&crtc->base,
-				   INTEL_INFO(dev_priv)->color.degamma_lut_size,
+				   INTEL_INFO(i915)->color.degamma_lut_size,
  				   has_ctm,
-				   INTEL_INFO(dev_priv)->color.gamma_lut_size);
+				   INTEL_INFO(i915)->color.gamma_lut_size);
  }
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
index 841708da5a56..0e7c18d783ca 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -44,7 +44,7 @@ static const struct cnl_procmon {
   * on its name.
   */
  static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+cnl_get_procmon_ref_values(struct drm_i915_private *i915, enum port port)
  {
  	const struct cnl_procmon *procmon;
  	u32 val;
@@ -74,13 +74,13 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
  	return procmon;
  }
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+static void cnl_set_procmon_ref_values(struct drm_i915_private *i915,
  				       enum port port)
  {
  	const struct cnl_procmon *procmon;
  	u32 val;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+	procmon = cnl_get_procmon_ref_values(i915, port);
val = I915_READ(ICL_PORT_COMP_DW1(port));
  	val &= ~((0xff << 16) | 0xff);
@@ -91,7 +91,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
  	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
  }
-static bool check_phy_reg(struct drm_i915_private *dev_priv,
+static bool check_phy_reg(struct drm_i915_private *i915,
  			  enum port port, i915_reg_t reg, u32 mask,
  			  u32 expected_val)
  {
@@ -108,47 +108,47 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
  	return true;
  }
-static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *i915,
  					  enum port port)
  {
  	const struct cnl_procmon *procmon;
  	bool ret;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+	procmon = cnl_get_procmon_ref_values(i915, port);
- ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+	ret = check_phy_reg(i915, port, ICL_PORT_COMP_DW1(port),
  			    (0xff << 16) | 0xff, procmon->dw1);
-	ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+	ret &= check_phy_reg(i915, port, ICL_PORT_COMP_DW9(port),
  			     -1U, procmon->dw9);
-	ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+	ret &= check_phy_reg(i915, port, ICL_PORT_COMP_DW10(port),
  			     -1U, procmon->dw10);
return ret;
  }
-static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+static bool cnl_combo_phy_enabled(struct drm_i915_private *i915)
  {
  	return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
  		(I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
  }
-static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *i915)
  {
  	enum port port = PORT_A;
  	bool ret;
- if (!cnl_combo_phy_enabled(dev_priv))
+	if (!cnl_combo_phy_enabled(i915))
  		return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+	ret = cnl_verify_procmon_ref_values(i915, port);
- ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+	ret &= check_phy_reg(i915, port, CNL_PORT_CL1CM_DW5,
  			     CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
  }
-static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_init(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -157,7 +157,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
  	I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
-	cnl_set_procmon_ref_values(dev_priv, PORT_A);
+	cnl_set_procmon_ref_values(i915, PORT_A);
val = I915_READ(CNL_PORT_COMP_DW0);
  	val |= COMP_INIT;
@@ -168,11 +168,11 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
  	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
  }
-static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_uninit(struct drm_i915_private *i915)
  {
  	u32 val;
- if (!cnl_combo_phy_verify_state(dev_priv))
+	if (!cnl_combo_phy_verify_state(i915))
  		DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
val = I915_READ(CHICKEN_MISC_2);
@@ -180,7 +180,7 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
  	I915_WRITE(CHICKEN_MISC_2, val);
  }
-static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_enabled(struct drm_i915_private *i915,
  				  enum port port)
  {
  	return !(I915_READ(ICL_PHY_MISC(port)) &
@@ -188,27 +188,27 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
  		(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
  }
-static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_verify_state(struct drm_i915_private *i915,
  				       enum port port)
  {
  	bool ret;
- if (!icl_combo_phy_enabled(dev_priv, port))
+	if (!icl_combo_phy_enabled(i915, port))
  		return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+	ret = cnl_verify_procmon_ref_values(i915, port);
if (port == PORT_A)
-		ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+		ret &= check_phy_reg(i915, port, ICL_PORT_COMP_DW8(port),
  				     IREFGEN, IREFGEN);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+	ret &= check_phy_reg(i915, port, ICL_PORT_CL_DW5(port),
  			     CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
  }
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *i915,
  				    enum port port, bool is_dsi,
  				    int lane_count, bool lane_reversal)
  {
@@ -260,14 +260,14 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
  	I915_WRITE(ICL_PORT_CL_DW10(port), val);
  }
-static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_init(struct drm_i915_private *i915)
  {
  	enum port port;
- for_each_combo_port(dev_priv, port) {
+	for_each_combo_port(i915, port) {
  		u32 val;
- if (icl_combo_phy_verify_state(dev_priv, port)) {
+		if (icl_combo_phy_verify_state(i915, port)) {
  			DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
  					 port_name(port));
  			continue;
@@ -277,7 +277,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
  		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
  		I915_WRITE(ICL_PHY_MISC(port), val);
- cnl_set_procmon_ref_values(dev_priv, port);
+		cnl_set_procmon_ref_values(i915, port);
if (port == PORT_A) {
  			val = I915_READ(ICL_PORT_COMP_DW8(port));
@@ -295,15 +295,15 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
  	}
  }
-static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_uninit(struct drm_i915_private *i915)
  {
  	enum port port;
- for_each_combo_port_reverse(dev_priv, port) {
+	for_each_combo_port_reverse(i915, port) {
  		u32 val;
if (port == PORT_A &&
-		    !icl_combo_phy_verify_state(dev_priv, port))
+		    !icl_combo_phy_verify_state(i915, port))
  			DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
  				 port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.h b/drivers/gpu/drm/i915/intel_combo_phy.h
index e6e195a83b19..a08286cfcecf 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/intel_combo_phy.h
@@ -11,9 +11,9 @@
struct drm_i915_private; -void intel_combo_phy_init(struct drm_i915_private *dev_priv);
-void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_init(struct drm_i915_private *i915);
+void intel_combo_phy_uninit(struct drm_i915_private *i915);
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *i915,
  				    enum port port, bool is_dsi,
  				    int lane_count, bool lane_reversal);
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index 073b6c3ab7cc..2912804d5a0d 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -219,10 +219,10 @@ void
  intel_attach_force_audio_property(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_property *prop;
- prop = dev_priv->force_audio_property;
+	prop = i915->force_audio_property;
  	if (prop == NULL) {
  		prop = drm_property_create_enum(dev, 0,
  					   "audio",
@@ -231,7 +231,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
  		if (prop == NULL)
  			return;
- dev_priv->force_audio_property = prop;
+		i915->force_audio_property = prop;
  	}
  	drm_object_attach_property(&connector->base, prop, 0);
  }
@@ -246,10 +246,10 @@ void
  intel_attach_broadcast_rgb_property(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_property *prop;
- prop = dev_priv->broadcast_rgb_property;
+	prop = i915->broadcast_rgb_property;
  	if (prop == NULL) {
  		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
  					   "Broadcast RGB",
@@ -258,7 +258,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
  		if (prop == NULL)
  			return;
- dev_priv->broadcast_rgb_property = prop;
+		i915->broadcast_rgb_property = prop;
  	}
drm_object_attach_property(&connector->base, prop, 0);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fcf2f84bcce..2a794e0bf457 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -70,7 +70,7 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
  	return intel_encoder_to_crt(intel_attached_encoder(connector));
  }
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_crt_port_enabled(struct drm_i915_private *i915,
  			    i915_reg_t adpa_reg, enum pipe *pipe)
  {
  	u32 val;
@@ -78,7 +78,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
  	val = I915_READ(adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
-	if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		*pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
  	else
  		*pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
@@ -89,26 +89,26 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
  static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
  				   enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crt *crt = intel_encoder_to_crt(encoder);
  	intel_wakeref_t wakeref;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
- ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
+	ret = intel_crt_port_enabled(i915, crt->adpa_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
  }
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crt *crt = intel_encoder_to_crt(encoder);
  	u32 tmp, flags = 0;
@@ -140,7 +140,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
  static void hsw_crt_get_config(struct intel_encoder *encoder,
  			       struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
intel_ddi_get_config(encoder, pipe_config); @@ -150,7 +150,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
  					      DRM_MODE_FLAG_NVSYNC);
  	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+	pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(i915);
  }
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -159,13 +159,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       int mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crt *crt = intel_encoder_to_crt(encoder);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
  	u32 adpa;
- if (INTEL_GEN(dev_priv) >= 5)
+	if (INTEL_GEN(i915) >= 5)
  		adpa = ADPA_HOTPLUG_BITS;
  	else
  		adpa = 0;
@@ -176,14 +176,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
  		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
-	if (HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_LPT(i915))
  		; /* Those bits don't exist here */
-	else if (HAS_PCH_CPT(dev_priv))
+	else if (HAS_PCH_CPT(i915))
  		adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
  	else
  		adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+	if (!HAS_PCH_SPLIT(i915))
  		I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@@ -228,57 +228,57 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
  			    const struct intel_crtc_state *old_crtc_state,
  			    const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
WARN_ON(!old_crtc_state->has_pch_encoder); - intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+	intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, false);
  }
static void hsw_post_disable_crt(struct intel_encoder *encoder,
  				 const struct intel_crtc_state *old_crtc_state,
  				 const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
intel_ddi_disable_pipe_clock(old_crtc_state); pch_post_disable_crt(encoder, old_crtc_state, old_conn_state); - lpt_disable_pch_transcoder(dev_priv);
-	lpt_disable_iclkip(dev_priv);
+	lpt_disable_pch_transcoder(i915);
+	lpt_disable_iclkip(i915);
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state); WARN_ON(!old_crtc_state->has_pch_encoder); - intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+	intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, true);
  }
static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *crtc_state,
  				   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
WARN_ON(!crtc_state->has_pch_encoder); - intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+	intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, false);
  }
static void hsw_pre_enable_crt(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder); - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, false);
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+	i915->display.fdi_link_train(crtc, crtc_state);
intel_ddi_enable_pipe_clock(crtc_state);
  }
@@ -287,7 +287,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
  			   const struct intel_crtc_state *crtc_state,
  			   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum pipe pipe = crtc->pipe;
@@ -295,10 +295,10 @@ static void hsw_enable_crt(struct intel_encoder *encoder, intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); - intel_wait_for_vblank(dev_priv, pipe);
-	intel_wait_for_vblank(dev_priv, pipe);
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-	intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+	intel_wait_for_vblank(i915, pipe);
+	intel_wait_for_vblank(i915, pipe);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
+	intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, true);
  }
static void intel_enable_crt(struct intel_encoder *encoder,
@@ -313,8 +313,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
  		     struct drm_display_mode *mode)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int max_dotclk = dev_priv->max_dotclk_freq;
+	struct drm_i915_private *i915 = to_i915(dev);
+	int max_dotclk = i915->max_dotclk_freq;
  	int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -323,15 +323,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
  	if (mode->clock < 25000)
  		return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_LPT(i915))
  		max_clock = 180000;
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		/*
  		 * 270 MHz due to current DPLL limits,
  		 * DAC limit supposedly 355 MHz.
  		 */
  		max_clock = 270000;
-	else if (IS_GEN_RANGE(dev_priv, 3, 4))
+	else if (IS_GEN_RANGE(i915, 3, 4))
  		max_clock = 400000;
  	else
  		max_clock = 350000;
@@ -342,7 +342,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
  		return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
-	if (HAS_PCH_LPT(dev_priv) &&
+	if (HAS_PCH_LPT(i915) &&
  	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
  		return MODE_CLOCK_HIGH;
@@ -388,7 +388,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
  				  struct intel_crtc_state *pipe_config,
  				  struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_display_mode *adjusted_mode =
  		&pipe_config->base.adjusted_mode;
@@ -404,7 +404,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
  	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
-	if (HAS_PCH_LPT(dev_priv)) {
+	if (HAS_PCH_LPT(i915)) {
  		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
  			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
  			return -EINVAL;
@@ -423,13 +423,13 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
  	struct intel_crt *crt = intel_attached_crt(connector);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 adpa;
  	bool ret;
/* The first time through, trigger an explicit detection cycle */
  	if (crt->force_hotplug_required) {
-		bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+		bool turn_off_dac = HAS_PCH_SPLIT(i915);
  		u32 save_adpa;
crt->force_hotplug_required = 0;
@@ -443,7 +443,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    crt->adpa_reg,
  					    ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
  					    1000))
@@ -470,7 +470,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
  	struct intel_crt *crt = intel_attached_crt(connector);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	bool reenable_hpd;
  	u32 adpa;
  	bool ret;
@@ -488,7 +488,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
  	 *
  	 * Just disable HPD interrupts here to prevent this
  	 */
-	reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+	reenable_hpd = intel_hpd_disable(i915, crt->base.hpd_pin);
save_adpa = adpa = I915_READ(crt->adpa_reg);
  	DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -497,7 +497,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    crt->adpa_reg,
  				    ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
  				    1000)) {
@@ -515,7 +515,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
  	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
if (reenable_hpd)
-		intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+		intel_hpd_enable(i915, crt->base.hpd_pin);
return ret;
  }
@@ -523,15 +523,15 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
  static bool intel_crt_detect_hotplug(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 stat;
  	bool ret = false;
  	int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		return intel_ironlake_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915))
  		return valleyview_crt_detect_hotplug(connector);
/*
@@ -539,18 +539,18 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
  	 * to get a reliable result.
  	 */
- if (IS_G45(dev_priv))
+	if (IS_G45(i915))
  		tries = 2;
  	else
  		tries = 1;
for (i = 0; i < tries ; i++) {
  		/* turn on the FORCE_DETECT */
-		i915_hotplug_interrupt_update(dev_priv,
+		i915_hotplug_interrupt_update(i915,
  					      CRT_HOTPLUG_FORCE_DETECT,
  					      CRT_HOTPLUG_FORCE_DETECT);
  		/* wait for FORCE_DETECT to go off */
-		if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
+		if (intel_wait_for_register(&i915->uncore, PORT_HOTPLUG_EN,
  					    CRT_HOTPLUG_FORCE_DETECT, 0,
  					    1000))
  			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
@@ -563,7 +563,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
  	/* clear the interrupt we just generated, if any */
  	I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+	i915_hotplug_interrupt_update(i915, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
  }
@@ -605,14 +605,14 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
  static bool intel_crt_detect_ddc(struct drm_connector *connector)
  {
  	struct intel_crt *crt = intel_attached_crt(connector);
-	struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(crt->base.base.dev);
  	struct edid *edid;
  	struct i2c_adapter *i2c;
  	bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+	i2c = intel_gmbus_get_adapter(i915, i915->vbt.crt_ddc_pin);
  	edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@@ -642,8 +642,8 @@ static enum drm_connector_status
  intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
  {
  	struct drm_device *dev = crt->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 save_bclrpat;
  	u32 save_vtotal;
  	u32 vtotal, vactive;
@@ -677,7 +677,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
  	/* Set the border color to purple. */
  	intel_uncore_write(uncore, bclrpat_reg, 0x500050);
- if (!IS_GEN(dev_priv, 2)) {
+	if (!IS_GEN(i915, 2)) {
  		u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
  		intel_uncore_write(uncore,
  				   pipeconf_reg,
@@ -685,7 +685,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
  		intel_uncore_posting_read(uncore, pipeconf_reg);
  		/* Wait for next Vblank to substitue
  		 * border color for Color info */
-		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(i915, pipe);
  		st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
  		status = ((st00 & (1 << 4)) != 0) ?
  			connector_status_connected :
@@ -789,7 +789,7 @@ intel_crt_detect(struct drm_connector *connector,
  		 struct drm_modeset_acquire_ctx *ctx,
  		 bool force)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_crt *crt = intel_attached_crt(connector);
  	struct intel_encoder *intel_encoder = &crt->base;
  	intel_wakeref_t wakeref;
@@ -801,7 +801,7 @@ intel_crt_detect(struct drm_connector *connector,
  		      force);
if (i915_modparams.load_detect_test) {
-		wakeref = intel_display_power_get(dev_priv,
+		wakeref = intel_display_power_get(i915,
  						  intel_encoder->power_domain);
  		goto load_detect;
  	}
@@ -810,10 +810,10 @@ intel_crt_detect(struct drm_connector *connector,
  	if (dmi_check_system(intel_spurious_crt_detect))
  		return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv,
+	wakeref = intel_display_power_get(i915,
  					  intel_encoder->power_domain);
- if (I915_HAS_HOTPLUG(dev_priv)) {
+	if (I915_HAS_HOTPLUG(i915)) {
  		/* We can not rely on the HPD pin always being correctly wired
  		 * up, for example many KVM do not pass it through, and so
  		 * only trust an assertion that the monitor is connected.
@@ -835,7 +835,7 @@ intel_crt_detect(struct drm_connector *connector,
  	 * broken monitor (without edid) to work behind a broken kvm (that fails
  	 * to have the right resistors for HP detection) needs to fix this up.
  	 * For now just bail out. */
-	if (I915_HAS_HOTPLUG(dev_priv)) {
+	if (I915_HAS_HOTPLUG(i915)) {
  		status = connector_status_disconnected;
  		goto out;
  	}
@@ -851,7 +851,7 @@ intel_crt_detect(struct drm_connector *connector,
  	if (ret > 0) {
  		if (intel_crt_detect_ddc(connector))
  			status = connector_status_connected;
-		else if (INTEL_GEN(dev_priv) < 4)
+		else if (INTEL_GEN(i915) < 4)
  			status = intel_crt_load_detect(crt,
  				to_intel_crtc(connector->state->crtc)->pipe);
  		else if (i915_modparams.load_detect_test)
@@ -866,44 +866,44 @@ intel_crt_detect(struct drm_connector *connector,
  	}
out:
-	intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+	intel_display_power_put(i915, intel_encoder->power_domain, wakeref);
  	return status;
  }
static int intel_crt_get_modes(struct drm_connector *connector)
  {
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crt *crt = intel_attached_crt(connector);
  	struct intel_encoder *intel_encoder = &crt->base;
  	intel_wakeref_t wakeref;
  	struct i2c_adapter *i2c;
  	int ret;
- wakeref = intel_display_power_get(dev_priv,
+	wakeref = intel_display_power_get(i915,
  					  intel_encoder->power_domain);
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+	i2c = intel_gmbus_get_adapter(i915, i915->vbt.crt_ddc_pin);
  	ret = intel_crt_ddc_get_modes(connector, i2c);
-	if (ret || !IS_G4X(dev_priv))
+	if (ret || !IS_G4X(i915))
  		goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
-	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
+	i2c = intel_gmbus_get_adapter(i915, GMBUS_PIN_DPB);
  	ret = intel_crt_ddc_get_modes(connector, i2c);
out:
-	intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+	intel_display_power_put(i915, intel_encoder->power_domain, wakeref);
return ret;
  }
void intel_crt_reset(struct drm_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *i915 = to_i915(encoder->dev);
  	struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_GEN(dev_priv) >= 5) {
+	if (INTEL_GEN(i915) >= 5) {
  		u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -942,7 +942,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
  	.destroy = intel_encoder_destroy,
  };
-void intel_crt_init(struct drm_i915_private *dev_priv)
+void intel_crt_init(struct drm_i915_private *i915)
  {
  	struct drm_connector *connector;
  	struct intel_crt *crt;
@@ -950,9 +950,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
  	i915_reg_t adpa_reg;
  	u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		adpa_reg = PCH_ADPA;
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		adpa_reg = VLV_ADPA;
  	else
  		adpa_reg = ADPA;
@@ -986,22 +986,22 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
connector = &intel_connector->base;
  	crt->connector = intel_connector;
-	drm_connector_init(&dev_priv->drm, &intel_connector->base,
+	drm_connector_init(&i915->drm, &intel_connector->base,
  			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
+	drm_encoder_init(&i915->drm, &crt->base.base, &intel_crt_enc_funcs,
  			 DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG;
  	crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		crt->base.crtc_mask = (1 << 0);
  	else
  		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		connector->interlace_allowed = 0;
  	else
  		connector->interlace_allowed = 1;
@@ -1011,13 +1011,13 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT; - if (I915_HAS_HOTPLUG(dev_priv) &&
+	if (I915_HAS_HOTPLUG(i915) &&
  	    !dmi_check_system(intel_spurious_crt_detect)) {
  		crt->base.hpd_pin = HPD_CRT;
  		crt->base.hotplug = intel_encoder_hotplug;
  	}
- if (HAS_DDI(dev_priv)) {
+	if (HAS_DDI(i915)) {
  		crt->base.port = PORT_E;
  		crt->base.get_config = hsw_crt_get_config;
  		crt->base.get_hw_state = intel_ddi_get_hw_state;
@@ -1028,7 +1028,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
  		crt->base.disable = hsw_disable_crt;
  		crt->base.post_disable = hsw_post_disable_crt;
  	} else {
-		if (HAS_PCH_SPLIT(dev_priv)) {
+		if (HAS_PCH_SPLIT(i915)) {
  			crt->base.compute_config = pch_crt_compute_config;
  			crt->base.disable = pch_disable_crt;
  			crt->base.post_disable = pch_post_disable_crt;
@@ -1045,7 +1045,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - if (!I915_HAS_HOTPLUG(dev_priv))
+	if (!I915_HAS_HOTPLUG(i915))
  		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
@@ -1058,11 +1058,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
  	 * polarity and link reversal bits or not, instead of relying on the
  	 * BIOS.
  	 */
-	if (HAS_PCH_LPT(dev_priv)) {
+	if (HAS_PCH_LPT(i915)) {
  		u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
  				 FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+		i915->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
  	}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/intel_crt.h
index 1b3fba359efc..f771fbcf8b27 100644
--- a/drivers/gpu/drm/i915/intel_crt.h
+++ b/drivers/gpu/drm/i915/intel_crt.h
@@ -13,9 +13,9 @@ struct drm_encoder;
  struct drm_i915_private;
  struct drm_i915_private;
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_crt_port_enabled(struct drm_i915_private *i915,
  			    i915_reg_t adpa_reg, enum pipe *pipe);
-void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_init(struct drm_i915_private *i915);
  void intel_crt_reset(struct drm_encoder *encoder);
#endif /* __INTEL_CRT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bf0eebd385b9..29ea3cb6bbaf 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -206,18 +206,18 @@ static const struct stepping_info icl_stepping_info[] = {
  static const struct stepping_info no_stepping_info = { '*', '*' };
static const struct stepping_info *
-intel_get_stepping_info(struct drm_i915_private *dev_priv)
+intel_get_stepping_info(struct drm_i915_private *i915)
  {
  	const struct stepping_info *si;
  	unsigned int size;
- if (IS_ICELAKE(dev_priv)) {
+	if (IS_ICELAKE(i915)) {
  		size = ARRAY_SIZE(icl_stepping_info);
  		si = icl_stepping_info;
-	} else if (IS_SKYLAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)) {
  		size = ARRAY_SIZE(skl_stepping_info);
  		si = skl_stepping_info;
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
  		size = ARRAY_SIZE(bxt_stepping_info);
  		si = bxt_stepping_info;
  	} else {
@@ -225,19 +225,19 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
  		si = NULL;
  	}
- if (INTEL_REVID(dev_priv) < size)
-		return si + INTEL_REVID(dev_priv);
+	if (INTEL_REVID(i915) < size)
+		return si + INTEL_REVID(i915);
return &no_stepping_info;
  }
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
  {
  	u32 val, mask;
mask = DC_STATE_DEBUG_MASK_MEMORY_UP; - if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
@@ -251,29 +251,29 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
/**
   * intel_csr_load_program() - write the firmware from memory to register.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
   *
   * CSR firmware is read from a .bin file and kept in internal memory one time.
   * Everytime display comes back from low power state this function is called to
   * copy the firmware from internal memory to registers.
   */
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_csr_load_program(struct drm_i915_private *i915)
  {
-	u32 *payload = dev_priv->csr.dmc_payload;
+	u32 *payload = i915->csr.dmc_payload;
  	u32 i, fw_size;
- if (!HAS_CSR(dev_priv)) {
+	if (!HAS_CSR(i915)) {
  		DRM_ERROR("No CSR support available for this platform\n");
  		return;
  	}
- if (!dev_priv->csr.dmc_payload) {
+	if (!i915->csr.dmc_payload) {
  		DRM_ERROR("Tried to program CSR with empty payload\n");
  		return;
  	}
- fw_size = dev_priv->csr.dmc_fw_size;
-	assert_rpm_wakelock_held(dev_priv);
+	fw_size = i915->csr.dmc_fw_size;
+	assert_rpm_wakelock_held(i915);
preempt_disable(); @@ -282,24 +282,24 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) preempt_enable(); - for (i = 0; i < dev_priv->csr.mmio_count; i++) {
-		I915_WRITE(dev_priv->csr.mmioaddr[i],
-			   dev_priv->csr.mmiodata[i]);
+	for (i = 0; i < i915->csr.mmio_count; i++) {
+		I915_WRITE(i915->csr.mmioaddr[i],
+			   i915->csr.mmiodata[i]);
  	}
- dev_priv->csr.dc_state = 0;
+	i915->csr.dc_state = 0;
- gen9_set_dc_state_debugmask(dev_priv);
+	gen9_set_dc_state_debugmask(i915);
  }
-static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
+static u32 *parse_csr_fw(struct drm_i915_private *i915,
  			 const struct firmware *fw)
  {
  	struct intel_css_header *css_header;
  	struct intel_package_header *package_header;
  	struct intel_dmc_header *dmc_header;
-	struct intel_csr *csr = &dev_priv->csr;
-	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
+	struct intel_csr *csr = &i915->csr;
+	const struct stepping_info *si = intel_get_stepping_info(i915);
  	u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
  	u32 i;
  	u32 *dmc_payload;
@@ -430,48 +430,48 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
  	return NULL;
  }
-static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_csr_runtime_pm_get(struct drm_i915_private *i915)
  {
-	WARN_ON(dev_priv->csr.wakeref);
-	dev_priv->csr.wakeref =
-		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+	WARN_ON(i915->csr.wakeref);
+	i915->csr.wakeref =
+		intel_display_power_get(i915, POWER_DOMAIN_INIT);
  }
-static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_csr_runtime_pm_put(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref __maybe_unused =
-		fetch_and_zero(&dev_priv->csr.wakeref);
+		fetch_and_zero(&i915->csr.wakeref);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
  }
static void csr_load_work_fn(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	struct intel_csr *csr;
  	const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), csr.work);
-	csr = &dev_priv->csr;
+	i915 = container_of(work, typeof(*i915), csr.work);
+	csr = &i915->csr;
- request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
+	request_firmware(&fw, i915->csr.fw_path, &i915->drm.pdev->dev);
  	if (fw)
-		dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
+		i915->csr.dmc_payload = parse_csr_fw(i915, fw);
- if (dev_priv->csr.dmc_payload) {
-		intel_csr_load_program(dev_priv);
-		intel_csr_runtime_pm_put(dev_priv);
+	if (i915->csr.dmc_payload) {
+		intel_csr_load_program(i915);
+		intel_csr_runtime_pm_put(i915);
DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
-			 dev_priv->csr.fw_path,
+			 i915->csr.fw_path,
  			 CSR_VERSION_MAJOR(csr->version),
  			 CSR_VERSION_MINOR(csr->version));
  	} else {
-		dev_notice(dev_priv->drm.dev,
+		dev_notice(i915->drm.dev,
  			   "Failed to load DMC firmware %s."
  			   " Disabling runtime power management.\n",
  			   csr->fw_path);
-		dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+		dev_notice(i915->drm.dev, "DMC firmware homepage: %s",
  			   INTEL_UC_FIRMWARE_URL);
  	}
@@ -480,18 +480,18 @@ static void csr_load_work_fn(struct work_struct *work) /**
   * intel_csr_ucode_init() - initialize the firmware loading.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
   *
   * This function is called at the time of loading the display driver to read
   * firmware from a .bin file and copied into a internal memory.
   */
-void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
+void intel_csr_ucode_init(struct drm_i915_private *i915)
  {
-	struct intel_csr *csr = &dev_priv->csr;
+	struct intel_csr *csr = &i915->csr;
- INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
+	INIT_WORK(&i915->csr.work, csr_load_work_fn);
- if (!HAS_CSR(dev_priv))
+	if (!HAS_CSR(i915))
  		return;
/*
@@ -502,32 +502,32 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
  	 * suspend as runtime suspend *requires* a working CSR for whatever
  	 * reason.
  	 */
-	intel_csr_runtime_pm_get(dev_priv);
+	intel_csr_runtime_pm_get(i915);
- if (INTEL_GEN(dev_priv) >= 12) {
+	if (INTEL_GEN(i915) >= 12) {
  		/* Allow to load fw via parameter using the last known size */
  		csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
-	} else if (IS_GEN(dev_priv, 11)) {
+	} else if (IS_GEN(i915, 11)) {
  		csr->fw_path = ICL_CSR_PATH;
  		csr->required_version = ICL_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
-	} else if (IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_CANNONLAKE(i915)) {
  		csr->fw_path = CNL_CSR_PATH;
  		csr->required_version = CNL_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
-	} else if (IS_GEMINILAKE(dev_priv)) {
+	} else if (IS_GEMINILAKE(i915)) {
  		csr->fw_path = GLK_CSR_PATH;
  		csr->required_version = GLK_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
-	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+	} else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
  		csr->fw_path = KBL_CSR_PATH;
  		csr->required_version = KBL_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
-	} else if (IS_SKYLAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)) {
  		csr->fw_path = SKL_CSR_PATH;
  		csr->required_version = SKL_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
  		csr->fw_path = BXT_CSR_PATH;
  		csr->required_version = BXT_CSR_VERSION_REQUIRED;
  		csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
@@ -551,63 +551,63 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
  	}
DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
-	schedule_work(&dev_priv->csr.work);
+	schedule_work(&i915->csr.work);
  }
/**
   * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
- * @dev_priv: i915 drm device
+ * @i915: i915 drm device
   *
   * Prepare the DMC firmware before entering system suspend. This includes
   * flushing pending work items and releasing any resources acquired during
   * init.
   */
-void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_csr_ucode_suspend(struct drm_i915_private *i915)
  {
-	if (!HAS_CSR(dev_priv))
+	if (!HAS_CSR(i915))
  		return;
- flush_work(&dev_priv->csr.work);
+	flush_work(&i915->csr.work);
/* Drop the reference held in case DMC isn't loaded. */
-	if (!dev_priv->csr.dmc_payload)
-		intel_csr_runtime_pm_put(dev_priv);
+	if (!i915->csr.dmc_payload)
+		intel_csr_runtime_pm_put(i915);
  }
/**
   * intel_csr_ucode_resume() - init CSR firmware during system resume
- * @dev_priv: i915 drm device
+ * @i915: i915 drm device
   *
   * Reinitialize the DMC firmware during system resume, reacquiring any
   * resources released in intel_csr_ucode_suspend().
   */
-void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_csr_ucode_resume(struct drm_i915_private *i915)
  {
-	if (!HAS_CSR(dev_priv))
+	if (!HAS_CSR(i915))
  		return;
/*
  	 * Reacquire the reference to keep RPM disabled in case DMC isn't
  	 * loaded.
  	 */
-	if (!dev_priv->csr.dmc_payload)
-		intel_csr_runtime_pm_get(dev_priv);
+	if (!i915->csr.dmc_payload)
+		intel_csr_runtime_pm_get(i915);
  }
/**
   * intel_csr_ucode_fini() - unload the CSR firmware.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
   *
   * Firmmware unloading includes freeing the internal memory and reset the
   * firmware loading status.
   */
-void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_csr_ucode_fini(struct drm_i915_private *i915)
  {
-	if (!HAS_CSR(dev_priv))
+	if (!HAS_CSR(i915))
  		return;
- intel_csr_ucode_suspend(dev_priv);
-	WARN_ON(dev_priv->csr.wakeref);
+	intel_csr_ucode_suspend(i915);
+	WARN_ON(i915->csr.wakeref);
- kfree(dev_priv->csr.dmc_payload);
+	kfree(i915->csr.dmc_payload);
  }
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 7925a176f900..eb61731f1abc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -586,9 +586,9 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
  };
static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bdw_get_buf_trans_edp(struct drm_i915_private *i915, int *n_entries)
  {
-	if (dev_priv->vbt.edp.low_vswing) {
+	if (i915->vbt.edp.low_vswing) {
  		*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
  		return bdw_ddi_translations_edp;
  	} else {
@@ -598,12 +598,12 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
  }
static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_dp(struct drm_i915_private *i915, int *n_entries)
  {
-	if (IS_SKL_ULX(dev_priv)) {
+	if (IS_SKL_ULX(i915)) {
  		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
  		return skl_y_ddi_translations_dp;
-	} else if (IS_SKL_ULT(dev_priv)) {
+	} else if (IS_SKL_ULT(i915)) {
  		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
  		return skl_u_ddi_translations_dp;
  	} else {
@@ -613,12 +613,12 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
  }
static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+kbl_get_buf_trans_dp(struct drm_i915_private *i915, int *n_entries)
  {
-	if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
+	if (IS_KBL_ULX(i915) || IS_CFL_ULX(i915)) {
  		*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
  		return kbl_y_ddi_translations_dp;
-	} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
+	} else if (IS_KBL_ULT(i915) || IS_CFL_ULT(i915)) {
  		*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
  		return kbl_u_ddi_translations_dp;
  	} else {
@@ -628,15 +628,15 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
  }
static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_edp(struct drm_i915_private *i915, int *n_entries)
  {
-	if (dev_priv->vbt.edp.low_vswing) {
-		if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
-		    IS_CFL_ULX(dev_priv)) {
+	if (i915->vbt.edp.low_vswing) {
+		if (IS_SKL_ULX(i915) || IS_KBL_ULX(i915) ||
+		    IS_CFL_ULX(i915)) {
  			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
  			return skl_y_ddi_translations_edp;
-		} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
-			   IS_CFL_ULT(dev_priv)) {
+		} else if (IS_SKL_ULT(i915) || IS_KBL_ULT(i915) ||
+			   IS_CFL_ULT(i915)) {
  			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
  			return skl_u_ddi_translations_edp;
  		} else {
@@ -645,17 +645,17 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
  		}
  	}
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
-		return kbl_get_buf_trans_dp(dev_priv, n_entries);
+	if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
+		return kbl_get_buf_trans_dp(i915, n_entries);
  	else
-		return skl_get_buf_trans_dp(dev_priv, n_entries);
+		return skl_get_buf_trans_dp(i915, n_entries);
  }
static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_hdmi(struct drm_i915_private *i915, int *n_entries)
  {
-	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
-	    IS_CFL_ULX(dev_priv)) {
+	if (IS_SKL_ULX(i915) || IS_KBL_ULX(i915) ||
+	    IS_CFL_ULX(i915)) {
  		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
  		return skl_y_ddi_translations_hdmi;
  	} else {
@@ -674,23 +674,23 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
  }
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_dp(struct drm_i915_private *i915,
  			   enum port port, int *n_entries)
  {
-	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+	if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
  		const struct ddi_buf_trans *ddi_translations =
-			kbl_get_buf_trans_dp(dev_priv, n_entries);
+			kbl_get_buf_trans_dp(i915, n_entries);
  		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
  		return ddi_translations;
-	} else if (IS_SKYLAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)) {
  		const struct ddi_buf_trans *ddi_translations =
-			skl_get_buf_trans_dp(dev_priv, n_entries);
+			skl_get_buf_trans_dp(i915, n_entries);
  		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
  		return ddi_translations;
-	} else if (IS_BROADWELL(dev_priv)) {
+	} else if (IS_BROADWELL(i915)) {
  		*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
  		return  bdw_ddi_translations_dp;
-	} else if (IS_HASWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915)) {
  		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
  		return hsw_ddi_translations_dp;
  	}
@@ -700,17 +700,17 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
  }
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_edp(struct drm_i915_private *i915,
  			    enum port port, int *n_entries)
  {
-	if (IS_GEN9_BC(dev_priv)) {
+	if (IS_GEN9_BC(i915)) {
  		const struct ddi_buf_trans *ddi_translations =
-			skl_get_buf_trans_edp(dev_priv, n_entries);
+			skl_get_buf_trans_edp(i915, n_entries);
  		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
  		return ddi_translations;
-	} else if (IS_BROADWELL(dev_priv)) {
-		return bdw_get_buf_trans_edp(dev_priv, n_entries);
-	} else if (IS_HASWELL(dev_priv)) {
+	} else if (IS_BROADWELL(i915)) {
+		return bdw_get_buf_trans_edp(i915, n_entries);
+	} else if (IS_HASWELL(i915)) {
  		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
  		return hsw_ddi_translations_dp;
  	}
@@ -720,13 +720,13 @@ intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
  }
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *i915,
  			    int *n_entries)
  {
-	if (IS_BROADWELL(dev_priv)) {
+	if (IS_BROADWELL(i915)) {
  		*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
  		return bdw_ddi_translations_fdi;
-	} else if (IS_HASWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915)) {
  		*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
  		return hsw_ddi_translations_fdi;
  	}
@@ -736,15 +736,15 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
  }
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *i915,
  			     int *n_entries)
  {
-	if (IS_GEN9_BC(dev_priv)) {
-		return skl_get_buf_trans_hdmi(dev_priv, n_entries);
-	} else if (IS_BROADWELL(dev_priv)) {
+	if (IS_GEN9_BC(i915)) {
+		return skl_get_buf_trans_hdmi(i915, n_entries);
+	} else if (IS_BROADWELL(i915)) {
  		*n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
  		return bdw_ddi_translations_hdmi;
-	} else if (IS_HASWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915)) {
  		*n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
  		return hsw_ddi_translations_hdmi;
  	}
@@ -754,32 +754,32 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
  }
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_dp(struct drm_i915_private *i915, int *n_entries)
  {
  	*n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
  	return bxt_ddi_translations_dp;
  }
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_edp(struct drm_i915_private *i915, int *n_entries)
  {
-	if (dev_priv->vbt.edp.low_vswing) {
+	if (i915->vbt.edp.low_vswing) {
  		*n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
  		return bxt_ddi_translations_edp;
  	}
- return bxt_get_buf_trans_dp(dev_priv, n_entries);
+	return bxt_get_buf_trans_dp(i915, n_entries);
  }
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_hdmi(struct drm_i915_private *i915, int *n_entries)
  {
  	*n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
  	return bxt_ddi_translations_hdmi;
  }
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_hdmi(struct drm_i915_private *i915, int *n_entries)
  {
  	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
@@ -800,7 +800,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
  }
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_dp(struct drm_i915_private *i915, int *n_entries)
  {
  	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
@@ -821,11 +821,11 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
  }
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_edp(struct drm_i915_private *i915, int *n_entries)
  {
  	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
- if (dev_priv->vbt.edp.low_vswing) {
+	if (i915->vbt.edp.low_vswing) {
  		if (voltage == VOLTAGE_INFO_0_85V) {
  			*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
  			return cnl_ddi_translations_edp_0_85V;
@@ -841,12 +841,12 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
  		}
  		return NULL;
  	} else {
-		return cnl_get_buf_trans_dp(dev_priv, n_entries);
+		return cnl_get_buf_trans_dp(i915, n_entries);
  	}
  }
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
+icl_get_combo_buf_trans(struct drm_i915_private *i915, enum port port,
  			int type, int rate, int *n_entries)
  {
  	if (type == INTEL_OUTPUT_HDMI) {
@@ -855,7 +855,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
  	} else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
  		*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
  		return icl_combo_phy_ddi_translations_edp_hbr3;
-	} else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+	} else if (type == INTEL_OUTPUT_EDP && i915->vbt.edp.low_vswing) {
  		*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
  		return icl_combo_phy_ddi_translations_edp_hbr2;
  	}
@@ -864,33 +864,33 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
  	return icl_combo_phy_ddi_translations_dp_hbr2;
  }
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static int intel_ddi_hdmi_level(struct drm_i915_private *i915, enum port port)
  {
  	int n_entries, level, default_entry;
- level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	level = i915->vbt.ddi_port_info[port].hdmi_level_shift;
- if (INTEL_GEN(dev_priv) >= 11) {
-		if (intel_port_is_combophy(dev_priv, port))
-			icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+	if (INTEL_GEN(i915) >= 11) {
+		if (intel_port_is_combophy(i915, port))
+			icl_get_combo_buf_trans(i915, port, INTEL_OUTPUT_HDMI,
  						0, &n_entries);
  		else
  			n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
  		default_entry = n_entries - 1;
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+	} else if (IS_CANNONLAKE(i915)) {
+		cnl_get_buf_trans_hdmi(i915, &n_entries);
  		default_entry = n_entries - 1;
-	} else if (IS_GEN9_LP(dev_priv)) {
-		bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+	} else if (IS_GEN9_LP(i915)) {
+		bxt_get_buf_trans_hdmi(i915, &n_entries);
  		default_entry = n_entries - 1;
-	} else if (IS_GEN9_BC(dev_priv)) {
-		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+	} else if (IS_GEN9_BC(i915)) {
+		intel_ddi_get_buf_trans_hdmi(i915, &n_entries);
  		default_entry = 8;
-	} else if (IS_BROADWELL(dev_priv)) {
-		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+	} else if (IS_BROADWELL(i915)) {
+		intel_ddi_get_buf_trans_hdmi(i915, &n_entries);
  		default_entry = 7;
-	} else if (IS_HASWELL(dev_priv)) {
-		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+	} else if (IS_HASWELL(i915)) {
+		intel_ddi_get_buf_trans_hdmi(i915, &n_entries);
  		default_entry = 6;
  	} else {
  		WARN(1, "ddi translation table missing\n");
@@ -917,25 +917,25 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
  static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
  					 const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 iboost_bit = 0;
  	int i, n_entries;
  	enum port port = encoder->port;
  	const struct ddi_buf_trans *ddi_translations;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
-		ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
+		ddi_translations = intel_ddi_get_buf_trans_fdi(i915,
  							       &n_entries);
  	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-		ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
+		ddi_translations = intel_ddi_get_buf_trans_edp(i915, port,
  							       &n_entries);
  	else
-		ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
+		ddi_translations = intel_ddi_get_buf_trans_dp(i915, port,
  							      &n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
-	if (IS_GEN9_BC(dev_priv) &&
-	    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+	if (IS_GEN9_BC(i915) &&
+	    i915->vbt.ddi_port_info[port].dp_boost_level)
  		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -954,13 +954,13 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
  static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
  					   int level)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 iboost_bit = 0;
  	int n_entries;
  	enum port port = encoder->port;
  	const struct ddi_buf_trans *ddi_translations;
- ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+	ddi_translations = intel_ddi_get_buf_trans_hdmi(i915, &n_entries);
if (WARN_ON_ONCE(!ddi_translations))
  		return;
@@ -968,8 +968,8 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
  		level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
-	if (IS_GEN9_BC(dev_priv) &&
-	    dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+	if (IS_GEN9_BC(i915) &&
+	    i915->vbt.ddi_port_info[port].hdmi_boost_level)
  		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -979,7 +979,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
  		   ddi_translations[level].trans2);
  }
-static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *i915,
  				    enum port port)
  {
  	i915_reg_t reg = DDI_BUF_CTL(port);
@@ -1064,7 +1064,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
  			const struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_encoder *encoder;
  	u32 temp, i, rx_ctl_val, ddi_pll_sel;
@@ -1085,7 +1085,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
  				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
-	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+	rx_ctl_val = i915->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
  		     FDI_RX_PLL_ENABLE |
  		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
  	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
@@ -1174,7 +1174,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
  		I915_WRITE(DP_TP_CTL(PORT_E), temp);
  		POSTING_READ(DP_TP_CTL(PORT_E));
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+		intel_wait_ddi_buf_idle(i915, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
  		temp = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -1223,7 +1223,7 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
  	return ret;
  }
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *i915,
  				   i915_reg_t reg)
  {
  	int refclk;
@@ -1238,7 +1238,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
  		 * non-SSC for non-ULT HSW. Check FUSE_STRAP3
  		 * for the non-SSC reference frequency.
  		 */
-		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+		if (IS_HASWELL(i915) && !IS_HSW_ULT(i915)) {
  			if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
  				refclk = 24;
  			else
@@ -1325,7 +1325,7 @@ static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
  	return dco_freq / (p0 * p1 * p2 * 5);
  }
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+int cnl_calc_wrpll_link(struct drm_i915_private *i915,
  			struct intel_dpll_hw_state *pll_state)
  {
  	u32 p0, p1, p2, dco_freq, ref_clock;
@@ -1367,7 +1367,7 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
  		break;
  	}
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
+	ref_clock = cnl_hdmi_pll_ref_clock(i915);
dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
  		* ref_clock;
@@ -1381,7 +1381,7 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
  	return dco_freq / (p0 * p1 * p2 * 5);
  }
-static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
+static int icl_calc_tbt_pll_link(struct drm_i915_private *i915,
  				 enum port port)
  {
  	u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -1403,13 +1403,13 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
  	}
  }
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
+static int icl_calc_mg_pll_link(struct drm_i915_private *i915,
  				const struct intel_dpll_hw_state *pll_state)
  {
  	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
  	u64 tmp;
- ref_clock = dev_priv->cdclk.hw.ref;
+	ref_clock = i915->cdclk.hw.ref;
m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
  	m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
@@ -1483,21 +1483,21 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
  static void icl_ddi_clock_get(struct intel_encoder *encoder,
  			      struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
  	enum port port = encoder->port;
  	int link_clock;
- if (intel_port_is_combophy(dev_priv, port)) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
+	if (intel_port_is_combophy(i915, port)) {
+		link_clock = cnl_calc_wrpll_link(i915, pll_state);
  	} else {
-		enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
+		enum intel_dpll_id pll_id = intel_get_shared_dpll_id(i915,
  						pipe_config->shared_dpll);
if (pll_id == DPLL_ID_ICL_TBTPLL)
-			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
+			link_clock = icl_calc_tbt_pll_link(i915, port);
  		else
-			link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
+			link_clock = icl_calc_mg_pll_link(i915, pll_state);
  	}
pipe_config->port_clock = link_clock;
@@ -1508,12 +1508,12 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
  static void cnl_ddi_clock_get(struct intel_encoder *encoder,
  			      struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
  	int link_clock;
if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
+		link_clock = cnl_calc_wrpll_link(i915, pll_state);
  	} else {
  		link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
@@ -1604,7 +1604,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
  static void hsw_ddi_clock_get(struct intel_encoder *encoder,
  			      struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	int link_clock = 0;
  	u32 val, pll;
@@ -1620,10 +1620,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
  		link_clock = 270000;
  		break;
  	case PORT_CLK_SEL_WRPLL1:
-		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
+		link_clock = hsw_ddi_calc_wrpll_link(i915, WRPLL_CTL(0));
  		break;
  	case PORT_CLK_SEL_WRPLL2:
-		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
+		link_clock = hsw_ddi_calc_wrpll_link(i915, WRPLL_CTL(1));
  		break;
  	case PORT_CLK_SEL_SPLL:
  		pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
@@ -1675,24 +1675,24 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
  static void intel_ddi_clock_get(struct intel_encoder *encoder,
  				struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_CANNONLAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915))
  		cnl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		bxt_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_BC(dev_priv))
+	else if (IS_GEN9_BC(i915))
  		skl_ddi_clock_get(encoder, pipe_config);
-	else if (INTEL_GEN(dev_priv) <= 8)
+	else if (INTEL_GEN(i915) <= 8)
  		hsw_ddi_clock_get(encoder, pipe_config);
  }
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 temp;
@@ -1746,7 +1746,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
  				    bool state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 temp;
@@ -1762,7 +1762,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	enum port port = encoder->port;
@@ -1845,7 +1845,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
  void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
  	u32 val = I915_READ(reg);
@@ -1854,7 +1854,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
  	val |= TRANS_DDI_PORT_NONE;
  	I915_WRITE(reg, val);
- if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+	if (i915->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
  	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  		DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
  		/* Quirk time at 100ms for reliable operation */
@@ -1866,13 +1866,13 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
  				     bool enable)
  {
  	struct drm_device *dev = intel_encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	intel_wakeref_t wakeref;
  	enum pipe pipe = 0;
  	int ret = 0;
  	u32 tmp;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     intel_encoder->power_domain);
  	if (WARN_ON(!wakeref))
  		return -ENXIO;
@@ -1889,14 +1889,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
  		tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
  	I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
  out:
-	intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+	intel_display_power_put(i915, intel_encoder->power_domain, wakeref);
  	return ret;
  }
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
  {
  	struct drm_device *dev = intel_connector->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_encoder *encoder = intel_connector->encoder;
  	int type = intel_connector->base.connector_type;
  	enum port port = encoder->port;
@@ -1906,7 +1906,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
  	u32 tmp;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
@@ -1916,7 +1916,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
  		goto out;
  	}
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+	if (HAS_TRANSCODER_EDP(i915) && port == PORT_A)
  		cpu_transcoder = TRANSCODER_EDP;
  	else
  		cpu_transcoder = (enum transcoder) pipe;
@@ -1950,7 +1950,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
  	}
out:
-	intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
  }
@@ -1959,7 +1959,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  					u8 *pipe_mask, bool *is_dp_mst)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum port port = encoder->port;
  	intel_wakeref_t wakeref;
  	enum pipe p;
@@ -1969,7 +1969,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  	*pipe_mask = 0;
  	*is_dp_mst = false;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return;
@@ -1978,7 +1978,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  	if (!(tmp & DDI_BUF_CTL_ENABLE))
  		goto out;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
+	if (HAS_TRANSCODER_EDP(i915) && port == PORT_A) {
  		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@@ -2001,7 +2001,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  	}
mst_pipe_mask = 0;
-	for_each_pipe(dev_priv, p) {
+	for_each_pipe(i915, p) {
  		enum transcoder cpu_transcoder = (enum transcoder)p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -2033,7 +2033,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  		*is_dp_mst = mst_pipe_mask;
out:
-	if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
+	if (*pipe_mask && IS_GEN9_LP(i915)) {
  		tmp = I915_READ(BXT_PHY_CTL(port));
  		if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
  			    BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2042,7 +2042,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
  				  "(PHY_CTL %08x)\n", port_name(port), tmp);
  	}
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
  }
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2083,7 +2083,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
  static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
  					struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port;
/*
@@ -2095,29 +2095,29 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
  		return;
dig_port = enc_to_dig_port(&encoder->base);
-	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+	intel_display_power_get(i915, dig_port->ddi_io_power_domain);
/*
  	 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
  	 * ports.
  	 */
  	if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_port_is_tc(dev_priv, encoder->port))
-		intel_display_power_get(dev_priv,
+	    intel_port_is_tc(i915, encoder->port))
+		intel_display_power_get(i915,
  					intel_ddi_main_link_aux_domain(dig_port));
/*
  	 * VDSC power is needed when DSC is enabled
  	 */
  	if (crtc_state->dsc_params.compression_enable)
-		intel_display_power_get(dev_priv,
+		intel_display_power_get(i915,
  					intel_dsc_power_domain(crtc_state));
  }
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
  	enum port port = encoder->port;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -2129,7 +2129,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@@ -2137,7 +2137,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
  			   TRANS_CLK_SEL_DISABLED);
  }
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct drm_i915_private *i915,
  				enum port port, u8 iboost)
  {
  	u32 tmp;
@@ -2155,25 +2155,25 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
  			       int level, enum intel_output_type type)
  {
  	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
-		iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+		iboost = i915->vbt.ddi_port_info[port].hdmi_boost_level;
  	else
-		iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+		iboost = i915->vbt.ddi_port_info[port].dp_boost_level;
if (iboost == 0) {
  		const struct ddi_buf_trans *ddi_translations;
  		int n_entries;
if (type == INTEL_OUTPUT_HDMI)
-			ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+			ddi_translations = intel_ddi_get_buf_trans_hdmi(i915, &n_entries);
  		else if (type == INTEL_OUTPUT_EDP)
-			ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+			ddi_translations = intel_ddi_get_buf_trans_edp(i915, port, &n_entries);
  		else
-			ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+			ddi_translations = intel_ddi_get_buf_trans_dp(i915, port, &n_entries);
if (WARN_ON_ONCE(!ddi_translations))
  			return;
@@ -2189,33 +2189,33 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
  		return;
  	}
- _skl_ddi_set_iboost(dev_priv, port, iboost);
+	_skl_ddi_set_iboost(i915, port, iboost);
if (port == PORT_A && intel_dig_port->max_lanes == 4)
-		_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+		_skl_ddi_set_iboost(i915, PORT_E, iboost);
  }
static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
  				    int level, enum intel_output_type type)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	const struct bxt_ddi_buf_trans *ddi_translations;
  	enum port port = encoder->port;
  	int n_entries;
if (type == INTEL_OUTPUT_HDMI)
-		ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+		ddi_translations = bxt_get_buf_trans_hdmi(i915, &n_entries);
  	else if (type == INTEL_OUTPUT_EDP)
-		ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+		ddi_translations = bxt_get_buf_trans_edp(i915, &n_entries);
  	else
-		ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+		ddi_translations = bxt_get_buf_trans_dp(i915, &n_entries);
if (WARN_ON_ONCE(!ddi_translations))
  		return;
  	if (WARN_ON_ONCE(level >= n_entries))
  		level = n_entries - 1;
- bxt_ddi_phy_set_signal_level(dev_priv, port,
+	bxt_ddi_phy_set_signal_level(i915, port,
  				     ddi_translations[level].margin,
  				     ddi_translations[level].scale,
  				     ddi_translations[level].enable,
@@ -2224,32 +2224,32 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	enum port port = encoder->port;
  	int n_entries;
- if (INTEL_GEN(dev_priv) >= 11) {
-		if (intel_port_is_combophy(dev_priv, port))
-			icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+	if (INTEL_GEN(i915) >= 11) {
+		if (intel_port_is_combophy(i915, port))
+			icl_get_combo_buf_trans(i915, port, encoder->type,
  						intel_dp->link_rate, &n_entries);
  		else
  			n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
-	} else if (IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_CANNONLAKE(i915)) {
  		if (encoder->type == INTEL_OUTPUT_EDP)
-			cnl_get_buf_trans_edp(dev_priv, &n_entries);
+			cnl_get_buf_trans_edp(i915, &n_entries);
  		else
-			cnl_get_buf_trans_dp(dev_priv, &n_entries);
-	} else if (IS_GEN9_LP(dev_priv)) {
+			cnl_get_buf_trans_dp(i915, &n_entries);
+	} else if (IS_GEN9_LP(i915)) {
  		if (encoder->type == INTEL_OUTPUT_EDP)
-			bxt_get_buf_trans_edp(dev_priv, &n_entries);
+			bxt_get_buf_trans_edp(i915, &n_entries);
  		else
-			bxt_get_buf_trans_dp(dev_priv, &n_entries);
+			bxt_get_buf_trans_dp(i915, &n_entries);
  	} else {
  		if (encoder->type == INTEL_OUTPUT_EDP)
-			intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+			intel_ddi_get_buf_trans_edp(i915, port, &n_entries);
  		else
-			intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+			intel_ddi_get_buf_trans_dp(i915, port, &n_entries);
  	}
if (WARN_ON(n_entries < 1))
@@ -2284,18 +2284,18 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing
  static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
  				   int level, enum intel_output_type type)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	const struct cnl_ddi_buf_trans *ddi_translations;
  	enum port port = encoder->port;
  	int n_entries, ln;
  	u32 val;
if (type == INTEL_OUTPUT_HDMI)
-		ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+		ddi_translations = cnl_get_buf_trans_hdmi(i915, &n_entries);
  	else if (type == INTEL_OUTPUT_EDP)
-		ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+		ddi_translations = cnl_get_buf_trans_edp(i915, &n_entries);
  	else
-		ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
+		ddi_translations = cnl_get_buf_trans_dp(i915, &n_entries);
if (WARN_ON_ONCE(!ddi_translations))
  		return;
@@ -2348,7 +2348,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
  static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
  				    int level, enum intel_output_type type)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	int width, rate, ln;
  	u32 val;
@@ -2412,7 +2412,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
  	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
  }
-static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
+static void icl_ddi_combo_vswing_program(struct drm_i915_private *i915,
  					u32 level, enum port port, int type,
  					int rate)
  {
@@ -2420,7 +2420,7 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
  	u32 n_entries, val;
  	int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
+	ddi_translations = icl_get_combo_buf_trans(i915, port, type,
  						   rate, &n_entries);
  	if (!ddi_translations)
  		return;
@@ -2472,7 +2472,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
  					      u32 level,
  					      enum intel_output_type type)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	int width = 0;
  	int rate = 0;
@@ -2530,7 +2530,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
  	I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
-	icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
+	icl_ddi_combo_vswing_program(i915, level, port, type, rate);
/* 6. Set training enable to trigger update */
  	val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
@@ -2542,7 +2542,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
  					   int link_clock,
  					   u32 level)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
  	u32 n_entries, val;
@@ -2662,10 +2662,10 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
  				    u32 level,
  				    enum intel_output_type type)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
- if (intel_port_is_combophy(dev_priv, port))
+	if (intel_port_is_combophy(i915, port))
  		icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
  	else
  		icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@ -2698,14 +2698,14 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
  u32 bxt_signal_levels(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dport->base.base.dev);
  	struct intel_encoder *encoder = &dport->base;
  	int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
  					level, encoder->type);
-	else if (IS_CANNONLAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915))
  		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
  	else
  		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
@@ -2716,24 +2716,24 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
  u32 ddi_signal_levels(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dport->base.base.dev);
  	struct intel_encoder *encoder = &dport->base;
  	int level = intel_ddi_dp_level(intel_dp);
- if (IS_GEN9_BC(dev_priv))
+	if (IS_GEN9_BC(i915))
  		skl_ddi_set_iboost(encoder, level, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
  }
static inline
-u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *i915,
  			      enum port port)
  {
-	if (intel_port_is_combophy(dev_priv, port)) {
+	if (intel_port_is_combophy(i915, port)) {
  		return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
-	} else if (intel_port_is_tc(dev_priv, port)) {
-		enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	} else if (intel_port_is_tc(i915, port)) {
+		enum tc_port tc_port = intel_port_to_tc(i915, port);
return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
  	}
@@ -2744,47 +2744,47 @@ u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
  static void icl_map_plls_to_ports(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
  	enum port port = encoder->port;
  	u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
-	WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+	WARN_ON((val & icl_dpclka_cfgcr0_clk_off(i915, port)) == 0);
- if (intel_port_is_combophy(dev_priv, port)) {
+	if (intel_port_is_combophy(i915, port)) {
  		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
  		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
  		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
  		POSTING_READ(DPCLKA_CFGCR0_ICL);
  	}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	val &= ~icl_dpclka_cfgcr0_clk_off(i915, port);
  	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
-	val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	val |= icl_dpclka_cfgcr0_clk_off(i915, port);
  	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val;
  	enum port port;
  	u32 port_mask;
@@ -2821,7 +2821,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
  		 * Sanity check that we haven't incorrectly registered another
  		 * encoder using any of the ports of this DSI encoder.
  		 */
-		for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+		for_each_intel_encoder(&i915->drm, other_encoder) {
  			if (other_encoder == encoder)
  				continue;
@@ -2838,7 +2838,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
  	val = I915_READ(DPCLKA_CFGCR0_ICL);
  	for_each_port_masked(port, port_mask) {
  		bool ddi_clk_ungated = !(val &
-					 icl_dpclka_cfgcr0_clk_off(dev_priv,
+					 icl_dpclka_cfgcr0_clk_off(i915,
  								   port));
if (ddi_clk_needed == ddi_clk_ungated)
@@ -2853,7 +2853,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
  			 port_name(port));
-		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+		val |= icl_dpclka_cfgcr0_clk_off(i915, port);
  		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
  	}
  }
@@ -2861,7 +2861,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
  static void intel_ddi_clk_select(struct intel_encoder *encoder,
  				 const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	u32 val;
  	const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -2869,13 +2869,13 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
  	if (WARN_ON(!pll))
  		return;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
- if (INTEL_GEN(dev_priv) >= 11) {
-		if (!intel_port_is_combophy(dev_priv, port))
+	if (INTEL_GEN(i915) >= 11) {
+		if (!intel_port_is_combophy(i915, port))
  			I915_WRITE(DDI_CLK_SEL(port),
  				   icl_pll_to_ddi_clk_sel(encoder, crtc_state));
-	} else if (IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_CANNONLAKE(i915)) {
  		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
  		val = I915_READ(DPCLKA_CFGCR0);
  		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
@@ -2890,7 +2890,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
  		val = I915_READ(DPCLKA_CFGCR0);
  		val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
  		I915_WRITE(DPCLKA_CFGCR0, val);
-	} else if (IS_GEN9_BC(dev_priv)) {
+	} else if (IS_GEN9_BC(i915)) {
  		/* DDI -> PLL mapping  */
  		val = I915_READ(DPLL_CTRL2);
@@ -2901,37 +2901,37 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, I915_WRITE(DPLL_CTRL2, val); - } else if (INTEL_GEN(dev_priv) < 9) {
+	} else if (INTEL_GEN(i915) < 9) {
  		I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
  	}
- mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 11) {
-		if (!intel_port_is_combophy(dev_priv, port))
+	if (INTEL_GEN(i915) >= 11) {
+		if (!intel_port_is_combophy(i915, port))
  			I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
-	} else if (IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_CANNONLAKE(i915)) {
  		I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
  			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-	} else if (IS_GEN9_BC(dev_priv)) {
+	} else if (IS_GEN9_BC(i915)) {
  		I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
  			   DPLL_CTRL2_DDI_CLK_OFF(port));
-	} else if (INTEL_GEN(dev_priv) < 9) {
+	} else if (INTEL_GEN(i915) < 9) {
  		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
  	}
  }
static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
  {
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	enum port port = dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
  	u32 val;
  	int ln;
@@ -2961,9 +2961,9 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
  {
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	enum port port = dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
  	u32 val;
  	int ln;
@@ -2993,9 +2993,9 @@ static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
  	enum port port = intel_dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
  	u32 ln0, ln1, lane_info;
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
@@ -3069,7 +3069,7 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
  static void intel_ddi_enable_fec(struct intel_encoder *encoder,
  				 const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	u32 val;
@@ -3080,7 +3080,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
  	val |= DP_TP_CTL_FEC_ENABLE;
  	I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
+	if (intel_wait_for_register(&i915->uncore, DP_TP_STATUS(port),
  				    DP_TP_STATUS_FEC_ENABLE_LIVE,
  				    DP_TP_STATUS_FEC_ENABLE_LIVE,
  				    1))
@@ -3090,7 +3090,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
  static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
  					const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	u32 val;
@@ -3108,7 +3108,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
  				    const struct drm_connector_state *conn_state)
  {
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
@@ -3123,26 +3123,26 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_clk_select(encoder, crtc_state); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+	intel_display_power_get(i915, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
  	icl_disable_phy_clock_gating(dig_port);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
  					level, encoder->type);
-	else if (IS_CANNONLAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915))
  		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
  	else
  		intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_port_is_combophy(dev_priv, port)) {
+	if (intel_port_is_combophy(i915, port)) {
  		bool lane_reversal =
  			dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- intel_combo_phy_power_up_lanes(dev_priv, port, false,
+		intel_combo_phy_power_up_lanes(i915, port, false,
  					       crtc_state->lane_count,
  					       lane_reversal);
  	}
@@ -3154,7 +3154,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
  					      true);
  	intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
  	intel_dp_start_link_train(intel_dp);
-	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+	if (port != PORT_A || INTEL_GEN(i915) >= 9)
  		intel_dp_stop_link_train(intel_dp);
intel_ddi_enable_fec(encoder, crtc_state);
@@ -3173,32 +3173,32 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
  {
  	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
  	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
-	int level = intel_ddi_hdmi_level(dev_priv, port);
+	int level = intel_ddi_hdmi_level(i915, port);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
  	intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+	intel_display_power_get(i915, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
  	icl_disable_phy_clock_gating(dig_port);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
  					level, INTEL_OUTPUT_HDMI);
-	else if (IS_CANNONLAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915))
  		cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
  	else
  		intel_prepare_hdmi_ddi_buffers(encoder, level);
icl_enable_phy_clock_gating(dig_port); - if (IS_GEN9_BC(dev_priv))
+	if (IS_GEN9_BC(i915))
  		skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3213,7 +3213,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/*
@@ -3231,10 +3231,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder); - if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_map_plls_to_ports(encoder, crtc_state);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  		intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
@@ -3257,7 +3257,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
  static void intel_disable_ddi_buf(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	bool wait = false;
  	u32 val;
@@ -3278,14 +3278,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
  	intel_ddi_disable_fec_state(encoder, crtc_state);
if (wait)
-		intel_wait_ddi_buf_idle(dev_priv, port);
+		intel_wait_ddi_buf_idle(i915, port);
  }
static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
  				      const struct intel_crtc_state *old_crtc_state,
  				      const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	struct intel_dp *intel_dp = &dig_port->dp;
  	bool is_mst = intel_crtc_has_type(old_crtc_state,
@@ -3305,7 +3305,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
  	intel_edp_panel_vdd_on(intel_dp);
  	intel_edp_panel_off(intel_dp);
- intel_display_power_put_unchecked(dev_priv,
+	intel_display_power_put_unchecked(i915,
  					  dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3315,7 +3315,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
  					const struct intel_crtc_state *old_crtc_state,
  					const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
@@ -3326,7 +3326,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, intel_disable_ddi_buf(encoder, old_crtc_state); - intel_display_power_put_unchecked(dev_priv,
+	intel_display_power_put_unchecked(i915,
  					  dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3338,7 +3338,7 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *old_crtc_state,
  				   const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
/*
  	 * When called from DP MST code:
@@ -3360,7 +3360,7 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
  		intel_ddi_post_disable_dp(encoder,
  					  old_crtc_state, old_conn_state);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_unmap_plls_to_ports(encoder);
  }
@@ -3368,7 +3368,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
  				const struct intel_crtc_state *old_crtc_state,
  				const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val;
/*
@@ -3402,11 +3402,11 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
  				const struct intel_crtc_state *crtc_state,
  				const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	enum port port = encoder->port;
- if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
+	if (port == PORT_A && INTEL_GEN(i915) < 9)
  		intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(crtc_state, conn_state);
@@ -3419,7 +3419,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
  }
static i915_reg_t
-gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *i915,
  			       enum port port)
  {
  	static const i915_reg_t regs[] = {
@@ -3430,7 +3430,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
  		[PORT_E] = CHICKEN_TRANS_A,
  	};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+	WARN_ON(INTEL_GEN(i915) < 9);
if (WARN_ON(port < PORT_A || port > PORT_E))
  		port = PORT_A;
@@ -3442,7 +3442,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	struct drm_connector *connector = conn_state->connector;
  	enum port port = encoder->port;
@@ -3454,14 +3454,14 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
  			  connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
-	if (IS_GEN9_BC(dev_priv)) {
+	if (IS_GEN9_BC(i915)) {
  		/*
  		 * For some reason these chicken bits have been
  		 * stuffed into a transcoder register, event though
  		 * the bits affect a specific DDI port rather than
  		 * a specific transcoder.
  		 */
-		i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
+		i915_reg_t reg = gen9_chicken_trans_reg_by_port(i915, port);
  		u32 val;
val = I915_READ(reg);
@@ -3595,9 +3595,9 @@ static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
  					 const struct intel_crtc_state *pipe_config,
  					 enum port port)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
  	u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
  	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -3625,16 +3625,16 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
  			 const struct intel_crtc_state *crtc_state,
  			 const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	enum port port = encoder->port;
if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_port_is_tc(dev_priv, encoder->port))
-		intel_display_power_get(dev_priv,
+	    intel_port_is_tc(i915, encoder->port))
+		intel_display_power_get(i915,
  					intel_ddi_main_link_aux_domain(dig_port));
- if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		bxt_ddi_phy_set_lane_optim_mask(encoder,
  						crtc_state->lane_lat_optim_mask);
@@ -3654,19 +3654,19 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
  			   const struct intel_crtc_state *crtc_state,
  			   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_port_is_tc(dev_priv, encoder->port))
-		intel_display_power_put_unchecked(dev_priv,
+	    intel_port_is_tc(i915, encoder->port))
+		intel_display_power_put_unchecked(i915,
  						  intel_ddi_main_link_aux_domain(dig_port));
  }
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(intel_dig_port->base.base.dev);
  	enum port port = intel_dig_port->base.port;
  	u32 val;
@@ -3687,7 +3687,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
  		POSTING_READ(DP_TP_CTL(port));
if (wait)
-			intel_wait_ddi_buf_idle(dev_priv, port);
+			intel_wait_ddi_buf_idle(i915, port);
  	}
val = DP_TP_CTL_ENABLE |
@@ -3709,32 +3709,32 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
  	udelay(600);
  }
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct drm_i915_private *i915,
  				       enum transcoder cpu_transcoder)
  {
  	if (cpu_transcoder == TRANSCODER_EDP)
  		return false;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
+	if (!intel_display_power_is_enabled(i915, POWER_DOMAIN_AUDIO))
  		return false;
return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
  		AUDIO_OUTPUT_ENABLE(cpu_transcoder);
  }
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *i915,
  					 struct intel_crtc_state *crtc_state)
  {
-	if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+	if (INTEL_GEN(i915) >= 11 && crtc_state->port_clock > 594000)
  		crtc_state->min_voltage_level = 1;
-	else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+	else if (IS_CANNONLAKE(i915) && crtc_state->port_clock > 594000)
  		crtc_state->min_voltage_level = 2;
  }
void intel_ddi_get_config(struct intel_encoder *encoder,
  			  struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
  	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  	struct intel_digital_port *intel_dig_port;
@@ -3816,10 +3816,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
  	}
pipe_config->has_audio =
-		intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+		intel_ddi_is_audio_enabled(i915, cpu_transcoder);
- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
-	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
+	if (encoder->type == INTEL_OUTPUT_EDP && i915->vbt.edp.bpp &&
+	    pipe_config->pipe_bpp > i915->vbt.edp.bpp) {
  		/*
  		 * This is a big fat ugly hack.
  		 *
@@ -3834,17 +3834,17 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
  		 * load.
  		 */
  		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
-		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
+			      pipe_config->pipe_bpp, i915->vbt.edp.bpp);
+		i915->vbt.edp.bpp = pipe_config->pipe_bpp;
  	}
intel_ddi_clock_get(encoder, pipe_config); - if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		pipe_config->lane_lat_optim_mask =
  			bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+	intel_ddi_compute_min_voltage_level(i915, pipe_config);
intel_hdmi_read_gcp_infoframe(encoder, pipe_config); @@ -3885,11 +3885,11 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
  				    struct drm_connector_state *conn_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	int ret;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+	if (HAS_TRANSCODER_EDP(i915) && port == PORT_A)
  		pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
@@ -3899,17 +3899,17 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
  	if (ret)
  		return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+	if (IS_HASWELL(i915) && crtc->pipe == PIPE_A &&
  	    pipe_config->cpu_transcoder == TRANSCODER_EDP)
  		pipe_config->pch_pfit.force_thru =
  			pipe_config->pch_pfit.enabled ||
  			pipe_config->crc_enabled;
- if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		pipe_config->lane_lat_optim_mask =
  			bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+	intel_ddi_compute_min_voltage_level(i915, pipe_config);
return 0;
  }
@@ -4013,11 +4013,11 @@ static int modeset_pipe(struct drm_crtc *crtc,
  static int intel_hdmi_reset_link(struct intel_encoder *encoder,
  				 struct drm_modeset_acquire_ctx *ctx)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
  	struct intel_connector *connector = hdmi->attached_connector;
  	struct i2c_adapter *adapter =
-		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+		intel_gmbus_get_adapter(i915, hdmi->ddc_bus);
  	struct drm_connector_state *conn_state;
  	struct intel_crtc_state *crtc_state;
  	struct intel_crtc *crtc;
@@ -4027,7 +4027,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
  	if (!connector || connector->base.status != connector_status_connected)
  		return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex,
  			       ctx);
  	if (ret)
  		return ret;
@@ -4131,7 +4131,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
  {
-	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dport->base.base.dev);
if (dport->base.port != PORT_A)
  		return false;
@@ -4142,7 +4142,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
  	/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
  	 *                     supported configuration
  	 */
-	if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		return true;
/* Cannonlake: Most of SKUs don't support DDI_E, and the only
@@ -4150,8 +4150,8 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
  	 *             DDI_F what makes DDI_E useless. However for this
  	 *             case let's trust VBT info.
  	 */
-	if (IS_CANNONLAKE(dev_priv) &&
-	    !intel_bios_is_port_present(dev_priv, PORT_E))
+	if (IS_CANNONLAKE(i915) &&
+	    !intel_bios_is_port_present(i915, PORT_E))
  		return true;
return false;
@@ -4160,11 +4160,11 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
  static int
  intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dport->base.base.dev);
  	enum port port = intel_dport->base.port;
  	int max_lanes = 4;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return max_lanes;
if (port == PORT_A || port == PORT_E) {
@@ -4189,10 +4189,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
  	return max_lanes;
  }
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
+void intel_ddi_init(struct drm_i915_private *i915, enum port port)
  {
  	struct ddi_vbt_port_info *port_info =
-		&dev_priv->vbt.ddi_port_info[port];
+		&i915->vbt.ddi_port_info[port];
  	struct intel_digital_port *intel_dig_port;
  	struct intel_encoder *intel_encoder;
  	struct drm_encoder *encoder;
@@ -4202,7 +4202,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
  	init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
  	init_dp = port_info->supports_dp;
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+	if (intel_bios_is_lspcon_present(i915, port)) {
  		/*
  		 * Lspcon device needs to be driven with DP connector
  		 * with special detection sequence. So make sure DP
@@ -4227,7 +4227,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
  	intel_encoder = &intel_dig_port->base;
  	encoder = &intel_encoder->base;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
+	drm_encoder_init(&i915->drm, encoder, &intel_ddi_funcs,
  			 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->hotplug = intel_ddi_hotplug;
@@ -4248,10 +4248,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
  	intel_encoder->power_domain = intel_port_to_power_domain(port);
  	intel_encoder->port = port;
  	intel_encoder->cloneable = 0;
-	for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		intel_encoder->crtc_mask |= BIT(pipe);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
  			DDI_BUF_PORT_REVERSAL;
  	else
@@ -4259,9 +4259,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
  			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
  	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
  	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
-	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(i915, port);
- intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
+	intel_dig_port->tc_legacy_port = intel_port_is_tc(i915, port) &&
  					 !port_info->supports_typec_usb &&
  					 !port_info->supports_tbt;
@@ -4324,7 +4324,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_infoframe_init(intel_dig_port); - if (intel_port_is_tc(dev_priv, port))
+	if (intel_port_is_tc(i915, port))
  		intel_digital_port_connected(intel_encoder);
return;
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/intel_ddi.h
index a08365da2643..50dd6fac4e3b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.h
+++ b/drivers/gpu/drm/i915/intel_ddi.h
@@ -24,7 +24,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
  				const struct drm_connector_state *old_conn_state);
  void hsw_fdi_link_train(struct intel_crtc *crtc,
  			const struct intel_crtc_state *crtc_state);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+void intel_ddi_init(struct drm_i915_private *i915, enum port port);
  bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
  void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
  void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
@@ -36,7 +36,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
  			  struct intel_crtc_state *pipe_config);
  void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
  				    bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *i915,
  					 struct intel_crtc_state *crtc_state);
  u32 bxt_signal_levels(struct intel_dp *intel_dp);
  u32 ddi_signal_levels(struct intel_dp *intel_dp);
@@ -46,7 +46,7 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
  int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
  				     bool enable);
  void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+int cnl_calc_wrpll_link(struct drm_i915_private *i915,
  			struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7135d8dc32a7..a670881b3f1e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -182,15 +182,15 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
  	return total;
  }
-static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
+static void gen11_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	u8 s_en;
  	u32 ss_en, ss_en_mask;
  	u8 eu_en;
  	int s;
- if (IS_ELKHARTLAKE(dev_priv)) {
+	if (IS_ELKHARTLAKE(i915)) {
  		sseu->max_slices = 1;
  		sseu->max_subslices = 4;
  		sseu->max_eus_per_subslice = 8;
@@ -227,9 +227,9 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->has_eu_pg = 1;
  }
-static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
+static void gen10_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	const u32 fuse2 = I915_READ(GEN8_FUSE2);
  	int s, ss;
  	const int eu_mask = 0xff;
@@ -305,9 +305,9 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->has_eu_pg = 1;
  }
-static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
+static void cherryview_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
@@ -359,10 +359,10 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
  }
-static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
+static void gen9_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct intel_device_info *info = mkwrite_device_info(i915);
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	int s, ss;
  	u32 fuse2, eu_disable, subslice_mask;
  	const u8 eu_mask = 0xff;
@@ -371,8 +371,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
-	sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
-	sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
+	sseu->max_slices = IS_GEN9_LP(i915) ? 1 : 3;
+	sseu->max_subslices = IS_GEN9_LP(i915) ? 3 : 4;
  	sseu->max_eus_per_subslice = 8;
/*
@@ -442,12 +442,12 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
  	 * pair per subslice.
  	*/
  	sseu->has_slice_pg =
-		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
+		!IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
  	sseu->has_subslice_pg =
-		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
+		IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
  	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
- if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  #define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
  		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
@@ -464,9 +464,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
  	}
  }
-static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void broadwell_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	int s, ss;
  	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
@@ -547,9 +547,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->has_eu_pg = 0;
  }
-static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void haswell_sseu_info_init(struct drm_i915_private *i915)
  {
-	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+	struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
  	u32 fuse1;
  	int s, ss;
@@ -557,9 +557,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
  	 * There isn't a register to tell us how many slices/subslices. We
  	 * work off the PCI-ids here.
  	 */
-	switch (INTEL_INFO(dev_priv)->gt) {
+	switch (INTEL_INFO(i915)->gt) {
  	default:
-		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
+		MISSING_CASE(INTEL_INFO(i915)->gt);
  		/* fall through */
  	case 1:
  		sseu->slice_mask = BIT(0);
@@ -612,7 +612,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
  	sseu->has_eu_pg = 0;
  }
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
+static u32 read_reference_ts_freq(struct drm_i915_private *i915)
  {
  	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
  	u32 base_freq, frac_freq;
@@ -629,7 +629,7 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
  	return base_freq + frac_freq;
  }
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *i915,
  					u32 rpm_config_reg)
  {
  	u32 f19_2_mhz = 19200;
@@ -649,7 +649,7 @@ static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
  	}
  }
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *i915,
  					u32 rpm_config_reg)
  {
  	u32 f19_2_mhz = 19200;
@@ -675,21 +675,21 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
  	}
  }
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
+static u32 read_timestamp_frequency(struct drm_i915_private *i915)
  {
  	u32 f12_5_mhz = 12500;
  	u32 f19_2_mhz = 19200;
  	u32 f24_mhz = 24000;
- if (INTEL_GEN(dev_priv) <= 4) {
+	if (INTEL_GEN(i915) <= 4) {
  		/* PRMs say:
  		 *
  		 *     "The value in this register increments once every 16
  		 *      hclks." (through the “Clocking Configuration”
  		 *      (“CLKCFG”) MCHBAR register)
  		 */
-		return dev_priv->rawclk_freq / 16;
-	} else if (INTEL_GEN(dev_priv) <= 8) {
+		return i915->rawclk_freq / 16;
+	} else if (INTEL_GEN(i915) <= 8) {
  		/* PRMs say:
  		 *
  		 *     "The PCU TSC counts 10ns increments; this timestamp
@@ -697,14 +697,14 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
  		 *      rolling over every 1.5 hours).
  		 */
  		return f12_5_mhz;
-	} else if (INTEL_GEN(dev_priv) <= 9) {
+	} else if (INTEL_GEN(i915) <= 9) {
  		u32 ctc_reg = I915_READ(CTC_MODE);
  		u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
-			freq = read_reference_ts_freq(dev_priv);
+			freq = read_reference_ts_freq(i915);
  		} else {
-			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
+			freq = IS_GEN9_LP(i915) ? f19_2_mhz : f24_mhz;
/* Now figure out how the command stream's timestamp
  			 * register increments from this frequency (it might
@@ -715,7 +715,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
  		}
return freq;
-	} else if (INTEL_GEN(dev_priv) <= 11) {
+	} else if (INTEL_GEN(i915) <= 11) {
  		u32 ctc_reg = I915_READ(CTC_MODE);
  		u32 freq = 0;
@@ -725,15 +725,15 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
  		 * tells us which one we should use.
  		 */
  		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
-			freq = read_reference_ts_freq(dev_priv);
+			freq = read_reference_ts_freq(i915);
  		} else {
  			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
- if (INTEL_GEN(dev_priv) <= 10)
-				freq = gen10_get_crystal_clock_freq(dev_priv,
+			if (INTEL_GEN(i915) <= 10)
+				freq = gen10_get_crystal_clock_freq(i915,
  								rpm_config_reg);
  			else
-				freq = gen11_get_crystal_clock_freq(dev_priv,
+				freq = gen11_get_crystal_clock_freq(i915,
  								rpm_config_reg);
/* Now figure out how the command stream's timestamp
@@ -841,7 +841,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
/**
   * intel_device_info_runtime_init - initialize runtime info
- * @dev_priv: the i915 device
+ * @i915: the i915 device
   *
   * Determine various intel_device_info fields at runtime.
   *
@@ -855,16 +855,16 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
   *   - after the PCH has been detected,
   *   - before the first usage of the fields it can tweak.
   */
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+void intel_device_info_runtime_init(struct drm_i915_private *i915)
  {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
-	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
+	struct intel_device_info *info = mkwrite_device_info(i915);
+	struct intel_runtime_info *runtime = RUNTIME_INFO(i915);
  	enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 10) {
-		for_each_pipe(dev_priv, pipe)
+	if (INTEL_GEN(i915) >= 10) {
+		for_each_pipe(i915, pipe)
  			runtime->num_scalers[pipe] = 2;
-	} else if (IS_GEN(dev_priv, 9)) {
+	} else if (IS_GEN(i915, 9)) {
  		runtime->num_scalers[PIPE_A] = 2;
  		runtime->num_scalers[PIPE_B] = 2;
  		runtime->num_scalers[PIPE_C] = 1;
@@ -872,13 +872,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); - if (INTEL_GEN(dev_priv) >= 11)
-		for_each_pipe(dev_priv, pipe)
+	if (INTEL_GEN(i915) >= 11)
+		for_each_pipe(i915, pipe)
  			runtime->num_sprites[pipe] = 6;
-	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
-		for_each_pipe(dev_priv, pipe)
+	else if (IS_GEN(i915, 10) || IS_GEMINILAKE(i915))
+		for_each_pipe(i915, pipe)
  			runtime->num_sprites[pipe] = 3;
-	else if (IS_BROXTON(dev_priv)) {
+	else if (IS_BROXTON(i915)) {
  		/*
  		 * Skylake and Broxton currently don't expose the topmost plane as its
  		 * use is exclusive with the legacy cursor and we only want to expose
@@ -891,20 +891,20 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
  		runtime->num_sprites[PIPE_A] = 2;
  		runtime->num_sprites[PIPE_B] = 2;
  		runtime->num_sprites[PIPE_C] = 1;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		for_each_pipe(dev_priv, pipe)
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		for_each_pipe(i915, pipe)
  			runtime->num_sprites[pipe] = 2;
-	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
-		for_each_pipe(dev_priv, pipe)
+	} else if (INTEL_GEN(i915) >= 5 || IS_G4X(i915)) {
+		for_each_pipe(i915, pipe)
  			runtime->num_sprites[pipe] = 1;
  	}
if (i915_modparams.disable_display) {
  		DRM_INFO("Display disabled (module parameter)\n");
  		info->num_pipes = 0;
-	} else if (HAS_DISPLAY(dev_priv) &&
-		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
-		   HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_DISPLAY(i915) &&
+		   (IS_GEN_RANGE(i915, 7, 8)) &&
+		   HAS_PCH_SPLIT(i915)) {
  		u32 fuse_strap = I915_READ(FUSE_STRAP);
  		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -919,7 +919,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
  		 */
  		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
  		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
-		    (HAS_PCH_CPT(dev_priv) &&
+		    (HAS_PCH_CPT(i915) &&
  		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
  			DRM_INFO("Display fused off, disabling\n");
  			info->num_pipes = 0;
@@ -927,7 +927,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
  			DRM_INFO("PipeC fused off\n");
  			info->num_pipes -= 1;
  		}
-	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
+	} else if (HAS_DISPLAY(i915) && INTEL_GEN(i915) >= 9) {
  		u32 dfsm = I915_READ(SKL_DFSM);
  		u8 disabled_mask = 0;
  		bool invalid;
@@ -961,26 +961,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
  	}
/* Initialize slice/subslice/EU info */
-	if (IS_HASWELL(dev_priv))
-		haswell_sseu_info_init(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		cherryview_sseu_info_init(dev_priv);
-	else if (IS_BROADWELL(dev_priv))
-		broadwell_sseu_info_init(dev_priv);
-	else if (IS_GEN(dev_priv, 9))
-		gen9_sseu_info_init(dev_priv);
-	else if (IS_GEN(dev_priv, 10))
-		gen10_sseu_info_init(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 11)
-		gen11_sseu_info_init(dev_priv);
-
-	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
+	if (IS_HASWELL(i915))
+		haswell_sseu_info_init(i915);
+	else if (IS_CHERRYVIEW(i915))
+		cherryview_sseu_info_init(i915);
+	else if (IS_BROADWELL(i915))
+		broadwell_sseu_info_init(i915);
+	else if (IS_GEN(i915, 9))
+		gen9_sseu_info_init(i915);
+	else if (IS_GEN(i915, 10))
+		gen10_sseu_info_init(i915);
+	else if (INTEL_GEN(i915) >= 11)
+		gen11_sseu_info_init(i915);
+
+	if (IS_GEN(i915, 6) && intel_vtd_active()) {
  		DRM_INFO("Disabling ppGTT for VT-d support\n");
  		info->ppgtt_type = INTEL_PPGTT_NONE;
  	}
/* Initialize command stream timestamp frequency */
-	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(i915);
  }
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -997,16 +997,16 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
   * this point (but later we need to prune the forcewake domains for engines that
   * are indeed fused off).
   */
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
+void intel_device_info_init_mmio(struct drm_i915_private *i915)
  {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct intel_device_info *info = mkwrite_device_info(i915);
  	unsigned int logical_vdbox = 0;
  	unsigned int i;
  	u32 media_fuse;
  	u16 vdbox_mask;
  	u16 vebox_mask;
- if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
@@ -1016,7 +1016,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
  		      GEN11_GT_VEBOX_DISABLE_SHIFT;
for (i = 0; i < I915_MAX_VCS; i++) {
-		if (!HAS_ENGINE(dev_priv, _VCS(i)))
+		if (!HAS_ENGINE(i915, _VCS(i)))
  			continue;
if (!(BIT(i) & vdbox_mask)) {
@@ -1030,14 +1030,14 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
  		 * hooked up to an SFC (Scaler & Format Converter) unit.
  		 */
  		if (logical_vdbox++ % 2 == 0)
-			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
+			RUNTIME_INFO(i915)->vdbox_sfc_access |= BIT(i);
  	}
  	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
-			 vdbox_mask, VDBOX_MASK(dev_priv));
-	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
+			 vdbox_mask, VDBOX_MASK(i915));
+	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(i915));
for (i = 0; i < I915_MAX_VECS; i++) {
-		if (!HAS_ENGINE(dev_priv, _VECS(i)))
+		if (!HAS_ENGINE(i915, _VECS(i)))
  			continue;
if (!(BIT(i) & vebox_mask)) {
@@ -1046,6 +1046,6 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
  		}
  	}
  	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
-			 vebox_mask, VEBOX_MASK(dev_priv));
-	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
+			 vebox_mask, VEBOX_MASK(i915));
+	GEM_BUG_ON(vebox_mask != VEBOX_MASK(i915));
  }
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 3ea953a230b3..93e5cc95e5c0 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -218,8 +218,8 @@ struct intel_driver_caps {
const char *intel_platform_name(enum intel_platform platform); -void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
+void intel_device_info_subplatform_init(struct drm_i915_private *i915);
+void intel_device_info_runtime_init(struct drm_i915_private *i915);
  void intel_device_info_dump_flags(const struct intel_device_info *info,
  				  struct drm_printer *p);
  void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
@@ -227,7 +227,7 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
  void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
  				     struct drm_printer *p);
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
+void intel_device_info_init_mmio(struct drm_i915_private *i915);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
  			     struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1ddb48ca7a..be26ad9fc194 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -156,24 +156,24 @@ struct intel_limit {
  };
/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+int vlv_get_hpll_vco(struct drm_i915_private *i915)
  {
  	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
-	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+	hpll_freq = vlv_cck_read(i915, CCK_FUSE_REG) &
  		CCK_FUSE_HPLL_FREQ_MASK;
return vco_freq[hpll_freq] * 1000;
  }
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock(struct drm_i915_private *i915,
  		      const char *name, u32 reg, int ref_freq)
  {
  	u32 val;
  	int divider;
- val = vlv_cck_read(dev_priv, reg);
+	val = vlv_cck_read(i915, reg);
  	divider = val & CCK_FREQUENCY_VALUES;
WARN((val & CCK_FREQUENCY_STATUS) !=
@@ -183,42 +183,42 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  }
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_i915_private *i915,
  			   const char *name, u32 reg)
  {
  	int hpll;
- vlv_cck_get(dev_priv);
+	vlv_cck_get(i915);
- if (dev_priv->hpll_freq == 0)
-		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+	if (i915->hpll_freq == 0)
+		i915->hpll_freq = vlv_get_hpll_vco(i915);
- hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+	hpll = vlv_get_cck_clock(i915, name, reg, i915->hpll_freq);
- vlv_cck_put(dev_priv);
+	vlv_cck_put(i915);
return hpll;
  }
-static void intel_update_czclk(struct drm_i915_private *dev_priv)
+static void intel_update_czclk(struct drm_i915_private *i915)
  {
-	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
+	if (!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
  		return;
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+	i915->czclk_freq = vlv_get_cck_clock_hpll(i915, "czclk",
  						      CCK_CZ_CLOCK_CONTROL);
- DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", i915->czclk_freq);
  }
static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+intel_fdi_link_freq(struct drm_i915_private *i915,
  		    const struct intel_crtc_state *pipe_config)
  {
-	if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		return pipe_config->port_clock; /* SPLL */
  	else
-		return dev_priv->fdi_pll_freq;
+		return i915->fdi_pll_freq;
  }
static const struct intel_limit intel_limits_i8xx_dac = {
@@ -488,7 +488,7 @@ static const struct intel_limit intel_limits_bxt = {
/* WA Display #0827: Gen9:all */
  static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *i915, int pipe, bool enable)
  {
  	if (enable)
  		I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -502,7 +502,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
/* Wa_2006604312:icl */
  static void
-icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_scalerclkgating(struct drm_i915_private *i915, enum pipe pipe,
  		       bool enable)
  {
  	if (enable)
@@ -588,7 +588,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
   * Returns whether the given set of divisors are valid for a given refclk with
   * the given connectors.
   */
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_PLL_is_valid(struct drm_i915_private *i915,
  			       const struct intel_limit *limit,
  			       const struct dpll *clock)
  {
@@ -601,13 +601,13 @@ static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
  		INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
-	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+	if (!IS_PINEVIEW(i915) && !IS_VALLEYVIEW(i915) &&
+	    !IS_CHERRYVIEW(i915) && !IS_GEN9_LP(i915))
  		if (clock->m1 <= clock->m2)
  			INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-	    !IS_GEN9_LP(dev_priv)) {
+	if (!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915) &&
+	    !IS_GEN9_LP(i915)) {
  		if (clock->p < limit->p.min || limit->p.max < clock->p)
  			INTELPllInvalid("p out of range\n");
  		if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -630,7 +630,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
  		   const struct intel_crtc_state *crtc_state,
  		   int target)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  		/*
@@ -638,7 +638,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
  		 * We haven't figured out how to reliably set up different
  		 * single/dual channel state, if we even can.
  		 */
-		if (intel_is_dual_link_lvds(dev_priv))
+		if (intel_is_dual_link_lvds(i915))
  			return limit->p2.p2_fast;
  		else
  			return limit->p2.p2_slow;
@@ -1016,22 +1016,22 @@ bool intel_crtc_active(struct intel_crtc *crtc)
  		crtc->config->base.adjusted_mode.crtc_clock;
  }
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *i915,
  					     enum pipe pipe)
  {
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
return crtc->config->cpu_transcoder;
  }
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+static bool pipe_scanline_is_moving(struct drm_i915_private *i915,
  				    enum pipe pipe)
  {
  	i915_reg_t reg = PIPEDSL(pipe);
  	u32 line1, line2;
  	u32 line_mask;
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		line_mask = DSL_LINEMASK_GEN2;
  	else
  		line_mask = DSL_LINEMASK_GEN3;
@@ -1045,11 +1045,11 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/* Wait for the display line to settle/start moving */
-	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+	if (wait_for(pipe_scanline_is_moving(i915, pipe) == state, 100))
  		DRM_ERROR("pipe %c scanline %s wait timed out\n",
  			  pipe_name(pipe), onoff(state));
  }
@@ -1068,14 +1068,14 @@ static void
  intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
  		i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    reg, I965_PIPECONF_ACTIVE, 0,
  					    100))
  			WARN(1, "pipe_off wait timed out\n");
@@ -1085,7 +1085,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
  }
/* Only for pre-ILK configs */
-void assert_pll(struct drm_i915_private *dev_priv,
+void assert_pll(struct drm_i915_private *i915,
  		enum pipe pipe, bool state)
  {
  	u32 val;
@@ -1099,14 +1099,14 @@ void assert_pll(struct drm_i915_private *dev_priv,
  }
/* XXX: the dsi pll is shared between MIPI DSI ports */
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+void assert_dsi_pll(struct drm_i915_private *i915, bool state)
  {
  	u32 val;
  	bool cur_state;
- vlv_cck_get(dev_priv);
-	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-	vlv_cck_put(dev_priv);
+	vlv_cck_get(i915);
+	val = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL);
+	vlv_cck_put(i915);
cur_state = val & DSI_PLL_VCO_EN;
  	I915_STATE_WARN(cur_state != state,
@@ -1114,14 +1114,14 @@ void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  			onoff(state), onoff(cur_state));
  }
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+static void assert_fdi_tx(struct drm_i915_private *i915,
  			  enum pipe pipe, bool state)
  {
  	bool cur_state;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(i915,
  								      pipe);
- if (HAS_DDI(dev_priv)) {
+	if (HAS_DDI(i915)) {
  		/* DDI does not have a specific FDI_TX register */
  		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1136,7 +1136,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+static void assert_fdi_rx(struct drm_i915_private *i915,
  			  enum pipe pipe, bool state)
  {
  	u32 val;
@@ -1151,24 +1151,24 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
  				      enum pipe pipe)
  {
  	u32 val;
/* ILK FDI PLL is always enabled */
-	if (IS_GEN(dev_priv, 5))
+	if (IS_GEN(i915, 5))
  		return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
-	if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		return;
val = I915_READ(FDI_TX_CTL(pipe));
  	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  }
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+void assert_fdi_rx_pll(struct drm_i915_private *i915,
  		       enum pipe pipe, bool state)
  {
  	u32 val;
@@ -1181,17 +1181,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  			onoff(state), onoff(cur_state));
  }
-void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *i915, enum pipe pipe)
  {
  	i915_reg_t pp_reg;
  	u32 val;
  	enum pipe panel_pipe = INVALID_PIPE;
  	bool locked = true;
- if (WARN_ON(HAS_DDI(dev_priv)))
+	if (WARN_ON(HAS_DDI(i915)))
  		return;
- if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		u32 port_sel;
pp_reg = PP_CONTROL(0);
@@ -1199,22 +1199,22 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
switch (port_sel) {
  		case PANEL_PORT_SELECT_LVDS:
-			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+			intel_lvds_port_enabled(i915, PCH_LVDS, &panel_pipe);
  			break;
  		case PANEL_PORT_SELECT_DPA:
-			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+			intel_dp_port_enabled(i915, DP_A, PORT_A, &panel_pipe);
  			break;
  		case PANEL_PORT_SELECT_DPC:
-			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+			intel_dp_port_enabled(i915, PCH_DP_C, PORT_C, &panel_pipe);
  			break;
  		case PANEL_PORT_SELECT_DPD:
-			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+			intel_dp_port_enabled(i915, PCH_DP_D, PORT_D, &panel_pipe);
  			break;
  		default:
  			MISSING_CASE(port_sel);
  			break;
  		}
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		/* presumably write lock depends on pipe, not port select */
  		pp_reg = PP_CONTROL(pipe);
  		panel_pipe = pipe;
@@ -1225,7 +1225,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
-		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+		intel_lvds_port_enabled(i915, LVDS, &panel_pipe);
  	}
val = I915_READ(pp_reg);
@@ -1238,26 +1238,26 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  	     pipe_name(pipe));
  }
-void assert_pipe(struct drm_i915_private *dev_priv,
+void assert_pipe(struct drm_i915_private *i915,
  		 enum pipe pipe, bool state)
  {
  	bool cur_state;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(i915,
  								      pipe);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (wakeref) {
  		u32 val = I915_READ(PIPECONF(cpu_transcoder));
  		cur_state = !!(val & PIPECONF_ENABLE);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+		intel_display_power_put(i915, power_domain, wakeref);
  	} else {
  		cur_state = false;
  	}
@@ -1284,10 +1284,10 @@ static void assert_plane(struct intel_plane *plane, bool state)
static void assert_planes_disabled(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)
  		assert_plane_disabled(plane);
  }
@@ -1297,7 +1297,7 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
  		drm_crtc_vblank_put(crtc);
  }
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+void assert_pch_transcoder_disabled(struct drm_i915_private *i915,
  				    enum pipe pipe)
  {
  	u32 val;
@@ -1310,78 +1310,78 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  	     pipe_name(pipe));
  }
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct drm_i915_private *i915,
  				   enum pipe pipe, enum port port,
  				   i915_reg_t dp_reg)
  {
  	enum pipe port_pipe;
  	bool state;
- state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+	state = intel_dp_port_enabled(i915, dp_reg, port, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
  			"PCH DP %c enabled on transcoder %c, should be disabled\n",
  			port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+	I915_STATE_WARN(HAS_PCH_IBX(i915) && !state && port_pipe == PIPE_B,
  			"IBX PCH DP %c still using transcoder B\n",
  			port_name(port));
  }
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct drm_i915_private *i915,
  				     enum pipe pipe, enum port port,
  				     i915_reg_t hdmi_reg)
  {
  	enum pipe port_pipe;
  	bool state;
- state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
+	state = intel_sdvo_port_enabled(i915, hdmi_reg, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
  			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
  			port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+	I915_STATE_WARN(HAS_PCH_IBX(i915) && !state && port_pipe == PIPE_B,
  			"IBX PCH HDMI %c still using transcoder B\n",
  			port_name(port));
  }
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct drm_i915_private *i915,
  				      enum pipe pipe)
  {
  	enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
-	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
-	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+	assert_pch_dp_disabled(i915, pipe, PORT_B, PCH_DP_B);
+	assert_pch_dp_disabled(i915, pipe, PORT_C, PCH_DP_C);
+	assert_pch_dp_disabled(i915, pipe, PORT_D, PCH_DP_D);
- I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+	I915_STATE_WARN(intel_crt_port_enabled(i915, PCH_ADPA, &port_pipe) &&
  			port_pipe == pipe,
  			"PCH VGA enabled on transcoder %c, should be disabled\n",
  			pipe_name(pipe));
- I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+	I915_STATE_WARN(intel_lvds_port_enabled(i915, PCH_LVDS, &port_pipe) &&
  			port_pipe == pipe,
  			"PCH LVDS enabled on transcoder %c, should be disabled\n",
  			pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
-	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
-	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
-	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+	assert_pch_hdmi_disabled(i915, pipe, PORT_B, PCH_HDMIB);
+	assert_pch_hdmi_disabled(i915, pipe, PORT_C, PCH_HDMIC);
+	assert_pch_hdmi_disabled(i915, pipe, PORT_D, PCH_HDMID);
  }
static void _vlv_enable_pll(struct intel_crtc *crtc,
  			    const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  	POSTING_READ(DPLL(pipe));
  	udelay(150);
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DPLL(pipe),
  				    DPLL_LOCK_VLV,
  				    DPLL_LOCK_VLV,
@@ -1392,13 +1392,13 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
  static void vlv_enable_pll(struct intel_crtc *crtc,
  			   const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(i915, pipe);
/* PLL is protected by panel, make sure we can write it */
-	assert_panel_unlocked(dev_priv, pipe);
+	assert_panel_unlocked(i915, pipe);
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  		_vlv_enable_pll(crtc, pipe_config);
@@ -1411,19 +1411,19 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
  static void _chv_enable_pll(struct intel_crtc *crtc,
  			    const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	enum dpio_channel port = vlv_pipe_to_channel(pipe);
  	u32 tmp;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Enable back the 10bit clock to display controller */
-	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+	tmp = vlv_dpio_read(i915, pipe, CHV_CMN_DW14(port));
  	tmp |= DPIO_DCLKP_EN;
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW14(port), tmp);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
/*
  	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -1434,7 +1434,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
  	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
  				    1))
  		DRM_ERROR("PLL %d failed to lock\n", pipe);
@@ -1443,13 +1443,13 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
  static void chv_enable_pll(struct intel_crtc *crtc,
  			   const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(i915, pipe);
/* PLL is protected by panel, make sure we can write it */
-	assert_panel_unlocked(dev_priv, pipe);
+	assert_panel_unlocked(i915, pipe);
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  		_chv_enable_pll(crtc, pipe_config);
@@ -1464,7 +1464,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
  		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
  		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  		I915_WRITE(CBR4_VLV, 0);
-		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+		i915->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
/*
  		 * DPLLB VGA mode also seems to cause problems.
@@ -1477,27 +1477,27 @@ static void chv_enable_pll(struct intel_crtc *crtc,
  	}
  }
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct drm_i915_private *i915)
  {
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+	return IS_PINEVIEW(i915) || IS_MOBILE(i915);
  }
static void i9xx_enable_pll(struct intel_crtc *crtc,
  			    const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	i915_reg_t reg = DPLL(crtc->pipe);
  	u32 dpll = crtc_state->dpll_hw_state.dpll;
  	int i;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(i915, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
-	if (i9xx_has_pps(dev_priv))
-		assert_panel_unlocked(dev_priv, crtc->pipe);
+	if (i9xx_has_pps(i915))
+		assert_panel_unlocked(i915, crtc->pipe);
/*
  	 * Apparently we need to have VGA mode enabled prior to changing
@@ -1511,7 +1511,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
  	POSTING_READ(reg);
  	udelay(150);
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		I915_WRITE(DPLL_MD(crtc->pipe),
  			   crtc_state->dpll_hw_state.dpll_md);
  	} else {
@@ -1534,26 +1534,26 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
  static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/* Don't disable pipe or pipe PLLs if needed */
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		return;
/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(i915, pipe);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  	POSTING_READ(DPLL(pipe));
  }
-static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_disable_pll(struct drm_i915_private *i915, enum pipe pipe)
  {
  	u32 val;
/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(i915, pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
  		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1564,13 +1564,13 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  	POSTING_READ(DPLL(pipe));
  }
-static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void chv_disable_pll(struct drm_i915_private *i915, enum pipe pipe)
  {
  	enum dpio_channel port = vlv_pipe_to_channel(pipe);
  	u32 val;
/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(i915, pipe);
val = DPLL_SSC_REF_CLK_CHV |
  		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1580,17 +1580,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  	I915_WRITE(DPLL(pipe), val);
  	POSTING_READ(DPLL(pipe));
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Disable 10bit clock to display controller */
-	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+	val = vlv_dpio_read(i915, pipe, CHV_CMN_DW14(port));
  	val &= ~DPIO_DCLKP_EN;
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW14(port), val);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+void vlv_wait_port_ready(struct drm_i915_private *i915,
  			 struct intel_digital_port *dport,
  			 unsigned int expected_mask)
  {
@@ -1615,7 +1615,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  		BUG();
  	}
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    dpll_reg, port_mask, expected_mask,
  				    1000))
  		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@@ -1626,19 +1626,19 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	i915_reg_t reg;
  	u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
-	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
+	assert_shared_dpll_enabled(i915, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
-	assert_fdi_tx_enabled(dev_priv, pipe);
-	assert_fdi_rx_enabled(dev_priv, pipe);
+	assert_fdi_tx_enabled(i915, pipe);
+	assert_fdi_rx_enabled(i915, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		/* Workaround: Set the timing override bit before enabling the
  		 * pch transcoder. */
  		reg = TRANS_CHICKEN2(pipe);
@@ -1651,7 +1651,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
  	val = I915_READ(reg);
  	pipeconf_val = I915_READ(PIPECONF(pipe));
- if (HAS_PCH_IBX(dev_priv)) {
+	if (HAS_PCH_IBX(i915)) {
  		/*
  		 * Make the BPC in transcoder be consistent with
  		 * that in pipeconf reg. For HDMI we must use 8bpc
@@ -1666,7 +1666,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
val &= ~TRANS_INTERLACE_MASK;
  	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
-		if (HAS_PCH_IBX(dev_priv) &&
+		if (HAS_PCH_IBX(i915) &&
  		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
  			val |= TRANS_LEGACY_INTERLACED_ILK;
  		else
@@ -1676,20 +1676,20 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
  	}
I915_WRITE(reg, val | TRANS_ENABLE);
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
  				    100))
  		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  }
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+static void lpt_enable_pch_transcoder(struct drm_i915_private *i915,
  				      enum transcoder cpu_transcoder)
  {
  	u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
-	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
-	assert_fdi_rx_enabled(dev_priv, PIPE_A);
+	assert_fdi_tx_enabled(i915, (enum pipe) cpu_transcoder);
+	assert_fdi_rx_enabled(i915, PIPE_A);
/* Workaround: set timing override bit. */
  	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
@@ -1706,7 +1706,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  		val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    LPT_TRANSCONF,
  				    TRANS_STATE_ENABLE,
  				    TRANS_STATE_ENABLE,
@@ -1714,30 +1714,30 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  		DRM_ERROR("Failed to enable PCH transcoder\n");
  }
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *i915,
  					    enum pipe pipe)
  {
  	i915_reg_t reg;
  	u32 val;
/* FDI relies on the transcoder */
-	assert_fdi_tx_disabled(dev_priv, pipe);
-	assert_fdi_rx_disabled(dev_priv, pipe);
+	assert_fdi_tx_disabled(i915, pipe);
+	assert_fdi_rx_disabled(i915, pipe);
/* Ports must be off as well */
-	assert_pch_ports_disabled(dev_priv, pipe);
+	assert_pch_ports_disabled(i915, pipe);
reg = PCH_TRANSCONF(pipe);
  	val = I915_READ(reg);
  	val &= ~TRANS_ENABLE;
  	I915_WRITE(reg, val);
  	/* wait for PCH transcoder off, transcoder state */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    reg, TRANS_STATE_ENABLE, 0,
  				    50))
  		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		/* Workaround: Clear the timing override chicken bit again. */
  		reg = TRANS_CHICKEN2(pipe);
  		val = I915_READ(reg);
@@ -1746,7 +1746,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  	}
  }
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -1754,7 +1754,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  	val &= ~TRANS_ENABLE;
  	I915_WRITE(LPT_TRANSCONF, val);
  	/* wait for PCH transcoder off, transcoder state */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
  				    50))
  		DRM_ERROR("Failed to disable PCH transcoder\n");
@@ -1767,9 +1767,9 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_LPT(i915))
  		return PIPE_A;
  	else
  		return crtc->pipe;
@@ -1777,19 +1777,19 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
/*
  	 * On i965gm the hardware frame counter reads
  	 * zero when the TV encoder is enabled :(
  	 */
-	if (IS_I965GM(dev_priv) &&
+	if (IS_I965GM(i915) &&
  	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
  		return 0;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
  		return 0xffffffff; /* full 32 bit counter */
-	else if (INTEL_GEN(dev_priv) >= 3)
+	else if (INTEL_GEN(i915) >= 3)
  		return 0xffffff; /* only 24 bits of frame count */
  	else
  		return 0; /* Gen2 doesn't have a hardware frame counter */
@@ -1807,7 +1807,7 @@ static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
  static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
  	enum pipe pipe = crtc->pipe;
  	i915_reg_t reg;
@@ -1822,29 +1822,29 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
  	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
  	 * need the check.
  	 */
-	if (HAS_GMCH(dev_priv)) {
+	if (HAS_GMCH(i915)) {
  		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
-			assert_dsi_pll_enabled(dev_priv);
+			assert_dsi_pll_enabled(i915);
  		else
-			assert_pll_enabled(dev_priv, pipe);
+			assert_pll_enabled(i915, pipe);
  	} else {
  		if (new_crtc_state->has_pch_encoder) {
  			/* if driving the PCH, we need FDI enabled */
-			assert_fdi_rx_pll_enabled(dev_priv,
+			assert_fdi_rx_pll_enabled(i915,
  						  intel_crtc_pch_transcoder(crtc));
-			assert_fdi_tx_pll_enabled(dev_priv,
+			assert_fdi_tx_pll_enabled(i915,
  						  (enum pipe) cpu_transcoder);
  		}
  		/* FIXME: assert CPU port conditions for SNB+ */
  	}
- trace_intel_pipe_enable(dev_priv, pipe);
+	trace_intel_pipe_enable(i915, pipe);
reg = PIPECONF(cpu_transcoder);
  	val = I915_READ(reg);
  	if (val & PIPECONF_ENABLE) {
  		/* we keep both pipes enabled on 830 */
-		WARN_ON(!IS_I830(dev_priv));
+		WARN_ON(!IS_I830(i915));
  		return;
  	}
@@ -1865,7 +1865,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
  static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
  	enum pipe pipe = crtc->pipe;
  	i915_reg_t reg;
@@ -1879,7 +1879,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
  	 */
  	assert_planes_disabled(crtc);
- trace_intel_pipe_disable(dev_priv, pipe);
+	trace_intel_pipe_disable(i915, pipe);
reg = PIPECONF(cpu_transcoder);
  	val = I915_READ(reg);
@@ -1894,7 +1894,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
  		val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
-	if (!IS_I830(dev_priv))
+	if (!IS_I830(i915))
  		val &= ~PIPECONF_ENABLE;
I915_WRITE(reg, val);
@@ -1902,22 +1902,22 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
  		intel_wait_for_pipe_off(old_crtc_state);
  }
-static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
+static unsigned int intel_tile_size(const struct drm_i915_private *i915)
  {
-	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
+	return IS_GEN(i915, 2) ? 2048 : 4096;
  }
static unsigned int
  intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
  {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct drm_i915_private *i915 = to_i915(fb->dev);
  	unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
  	case DRM_FORMAT_MOD_LINEAR:
-		return intel_tile_size(dev_priv);
+		return intel_tile_size(i915);
  	case I915_FORMAT_MOD_X_TILED:
-		if (IS_GEN(dev_priv, 2))
+		if (IS_GEN(i915, 2))
  			return 128;
  		else
  			return 512;
@@ -1926,7 +1926,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
  			return 128;
  		/* fall through */
  	case I915_FORMAT_MOD_Y_TILED:
-		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
+		if (IS_GEN(i915, 2) || HAS_128_BYTE_Y_TILING(i915))
  			return 128;
  		else
  			return 512;
@@ -2017,26 +2017,26 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  	}
  }
-static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
+static unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
  {
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		return 16 * 1024;
-	else if (IS_I85X(dev_priv))
+	else if (IS_I85X(i915))
  		return 256;
-	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+	else if (IS_I845G(i915) || IS_I865G(i915))
  		return 32;
  	else
  		return 4 * 1024;
  }
-static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
+static unsigned int intel_linear_alignment(const struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return 256 * 1024;
-	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
-		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_I965G(i915) || IS_I965GM(i915) ||
+		 IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		return 128 * 1024;
-	else if (INTEL_GEN(dev_priv) >= 4)
+	else if (INTEL_GEN(i915) >= 4)
  		return 4 * 1024;
  	else
  		return 0;
@@ -2045,7 +2045,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
  static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
  					 int color_plane)
  {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct drm_i915_private *i915 = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
  	if (color_plane == 1)
@@ -2053,9 +2053,9 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
switch (fb->modifier) {
  	case DRM_FORMAT_MOD_LINEAR:
-		return intel_linear_alignment(dev_priv);
+		return intel_linear_alignment(i915);
  	case I915_FORMAT_MOD_X_TILED:
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			return 256 * 1024;
  		return 0;
  	case I915_FORMAT_MOD_Y_TILED_CCS:
@@ -2072,9 +2072,9 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
  static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
- return INTEL_GEN(dev_priv) < 4 ||
+	return INTEL_GEN(i915) < 4 ||
  		(plane->has_fbc &&
  		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
  }
@@ -2086,7 +2086,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  			   unsigned long *out_flags)
  {
  	struct drm_device *dev = fb->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  	intel_wakeref_t wakeref;
  	struct i915_vma *vma;
@@ -2102,7 +2102,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  	 * we should always have valid PTE following the scanout preventing
  	 * the VT-d warning.
  	 */
-	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+	if (intel_scanout_needs_vtd_wa(i915) && alignment < 256 * 1024)
  		alignment = 256 * 1024;
/*
@@ -2112,10 +2112,10 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  	 * intel_runtime_pm_put(), so it is correct to wrap only the
  	 * pin/unpin/fence and not more.
  	 */
-	wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
  	i915_gem_object_lock(obj);
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+	atomic_inc(&i915->gpu_error.pending_fb_pin);
pinctl = 0; @@ -2126,7 +2126,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  	 * complicated than this. For example, Cherryview appears quite
  	 * happy to scanout from anywhere within its global aperture.
  	 */
-	if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		pinctl |= PIN_MAPPABLE;
vma = i915_gem_object_pin_to_display_plane(obj,
@@ -2154,7 +2154,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  		 * mode that matches the user configuration.
  		 */
  		ret = i915_vma_pin_fence(vma);
-		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+		if (ret != 0 && INTEL_GEN(i915) < 4) {
  			i915_gem_object_unpin_from_display_plane(vma);
  			vma = ERR_PTR(ret);
  			goto err;
@@ -2166,10 +2166,10 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
i915_vma_get(vma);
  err:
-	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+	atomic_dec(&i915->gpu_error.pending_fb_pin);
i915_gem_object_unlock(obj);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  	return vma;
  }
@@ -2265,7 +2265,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
  				       unsigned int pitch,
  				       u32 old_offset, u32 new_offset)
  {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct drm_i915_private *i915 = to_i915(fb->dev);
  	unsigned int cpp = fb->format->cpp[color_plane];
WARN_ON(new_offset > old_offset);
@@ -2274,7 +2274,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
  		unsigned int tile_size, tile_width, tile_height;
  		unsigned int pitch_tiles;
- tile_size = intel_tile_size(dev_priv);
+		tile_size = intel_tile_size(i915);
  		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
@@ -2326,7 +2326,7 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
   * used. This is why the user has to pass in the pitch since it
   * is specified in the rotated orientation.
   */
-static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
+static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
  					int *x, int *y,
  					const struct drm_framebuffer *fb,
  					int color_plane,
@@ -2344,7 +2344,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
  		unsigned int tile_size, tile_width, tile_height;
  		unsigned int tile_rows, tiles, pitch_tiles;
- tile_size = intel_tile_size(dev_priv);
+		tile_size = intel_tile_size(i915);
  		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
@@ -2382,18 +2382,18 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y,
  					      int color_plane)
  {
  	struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
  	const struct drm_framebuffer *fb = state->base.fb;
  	unsigned int rotation = state->base.rotation;
  	int pitch = state->color_plane[color_plane].stride;
  	u32 alignment;
if (intel_plane->id == PLANE_CURSOR)
-		alignment = intel_cursor_alignment(dev_priv);
+		alignment = intel_cursor_alignment(i915);
  	else
  		alignment = intel_surf_alignment(fb, color_plane);
- return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
+	return intel_compute_aligned_offset(i915, x, y, fb, color_plane,
  					    pitch, rotation, alignment);
  }
@@ -2402,11 +2402,11 @@ static int intel_fb_offset_to_xy(int *x, int *y,
  				 const struct drm_framebuffer *fb,
  				 int color_plane)
  {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct drm_i915_private *i915 = to_i915(fb->dev);
  	unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
-	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+	    fb->offsets[color_plane] % intel_tile_size(i915)) {
  		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
  			      fb->offsets[color_plane], color_plane);
  		return -EINVAL;
@@ -2507,7 +2507,7 @@ bool is_ccs_modifier(u64 modifier)
  	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
  }
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+u32 intel_plane_fb_max_stride(struct drm_i915_private *i915,
  			      u32 pixel_format, u64 modifier)
  {
  	struct intel_crtc *crtc;
@@ -2517,7 +2517,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
  	 * We assume the primary plane for pipe A has
  	 * the highest stride limits of them all.
  	 */
-	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+	crtc = intel_get_crtc_for_pipe(i915, PIPE_A);
  	plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
@@ -2525,7 +2525,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
  }
static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+u32 intel_fb_max_stride(struct drm_i915_private *i915,
  			u32 pixel_format, u64 modifier)
  {
  	/*
@@ -2535,22 +2535,22 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
  	 * The new CCS hash mode makes remapping impossible
  	 */
  	if (!is_ccs_modifier(modifier)) {
-		if (INTEL_GEN(dev_priv) >= 7)
+		if (INTEL_GEN(i915) >= 7)
  			return 256*1024;
-		else if (INTEL_GEN(dev_priv) >= 4)
+		else if (INTEL_GEN(i915) >= 4)
  			return 128*1024;
  	}
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+	return intel_plane_fb_max_stride(i915, pixel_format, modifier);
  }
static u32
  intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
  {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct drm_i915_private *i915 = to_i915(fb->dev);
if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+		u32 max_stride = intel_plane_fb_max_stride(i915,
  							   fb->format->format,
  							   fb->modifier);
@@ -2559,7 +2559,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
  		 * we need the stride to be page aligned.
  		 */
  		if (fb->pitches[color_plane] > max_stride)
-			return intel_tile_size(dev_priv);
+			return intel_tile_size(i915);
  		else
  			return 64;
  	} else {
@@ -2570,7 +2570,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
  bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	int i;
@@ -2584,7 +2584,7 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
  	 * Would also need to deal with the fence POT alignment
  	 * and gen2 2KiB GTT tile size.
  	 */
-	if (INTEL_GEN(dev_priv) < 4)
+	if (INTEL_GEN(i915) < 4)
  		return false;
/*
@@ -2596,7 +2596,7 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
/* Linear needs a page aligned stride for remapping */
  	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-		unsigned int alignment = intel_tile_size(dev_priv) - 1;
+		unsigned int alignment = intel_tile_size(i915) - 1;
for (i = 0; i < fb->format->num_planes; i++) {
  			if (fb->pitches[i] & alignment)
@@ -2636,7 +2636,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
  }
static int
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
+intel_fill_fb_info(struct drm_i915_private *i915,
  		   struct drm_framebuffer *fb)
  {
  	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
@@ -2645,7 +2645,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
  	u32 gtt_offset_rotated = 0;
  	unsigned int max_size = 0;
  	int i, num_planes = fb->format->num_planes;
-	unsigned int tile_size = intel_tile_size(dev_priv);
+	unsigned int tile_size = intel_tile_size(i915);
for (i = 0; i < num_planes; i++) {
  		unsigned int width, height;
@@ -2719,7 +2719,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
  		intel_fb->normal[i].x = x;
  		intel_fb->normal[i].y = y;
- offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
+		offset = intel_compute_aligned_offset(i915, &x, &y, fb, i,
  						      fb->pitches[i],
  						      DRM_MODE_ROTATE_0,
  						      tile_size);
@@ -2803,14 +2803,14 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
  static void
  intel_plane_remap_gtt(struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	struct drm_framebuffer *fb = plane_state->base.fb;
  	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  	struct intel_rotation_info *info = &plane_state->view.rotated;
  	unsigned int rotation = plane_state->base.rotation;
  	int i, num_planes = fb->format->num_planes;
-	unsigned int tile_size = intel_tile_size(dev_priv);
+	unsigned int tile_size = intel_tile_size(i915);
  	unsigned int src_x, src_y;
  	unsigned int src_w, src_h;
  	u32 gtt_offset = 0;
@@ -2860,7 +2860,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
  		x += intel_fb->normal[i].x;
  		y += intel_fb->normal[i].y;
- offset = intel_compute_aligned_offset(dev_priv, &x, &y,
+		offset = intel_compute_aligned_offset(i915, &x, &y,
  						      fb, i, fb->pitches[i],
  						      DRM_MODE_ROTATE_0, tile_size);
  		offset /= tile_size;
@@ -3046,7 +3046,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  			      struct intel_initial_plane_config *plane_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_i915_gem_object *obj = NULL;
  	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  	struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -3062,7 +3062,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  	/* If the FB is too big, just don't use it since fbdev is not very
  	 * important and we should probably use that space with FBC or other
  	 * features. */
-	if (size_aligned * 2 > dev_priv->stolen_usable_size)
+	if (size_aligned * 2 > i915->stolen_usable_size)
  		return false;
switch (fb->modifier) {
@@ -3077,7 +3077,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  	}
mutex_lock(&dev->struct_mutex);
-	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
+	obj = i915_gem_object_create_stolen_for_preallocated(i915,
  							     base_aligned,
  							     base_aligned,
  							     size_aligned);
@@ -3135,7 +3135,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct drm_plane *plane;
/*
@@ -3145,7 +3145,7 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
  	 */
  	crtc_state->active_planes = 0;
- drm_for_each_plane_mask(plane, &dev_priv->drm,
+	drm_for_each_plane_mask(plane, &i915->drm,
  				crtc_state->base.plane_mask)
  		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
  }
@@ -3177,7 +3177,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  			     struct intel_initial_plane_config *plane_config)
  {
  	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_crtc *c;
  	struct drm_i915_gem_object *obj;
  	struct drm_plane *primary = intel_crtc->base.primary;
@@ -3272,7 +3272,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  	intel_state->base.dst = drm_plane_state_dest(plane_state);
if (i915_gem_object_is_tiled(obj))
-		dev_priv->preserve_bios_swizzle = true;
+		i915->preserve_bios_swizzle = true;
plane_state->fb = fb;
  	plane_state->crtc = &intel_crtc->base;
@@ -3381,7 +3381,7 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
static int skl_check_main_surface(struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+	struct drm_i915_private *i915 = to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
  	int x = plane_state->base.src.x1 >> 16;
@@ -3392,9 +3392,9 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
  	int max_height = 4096;
  	u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		max_width = icl_max_plane_width(fb, 0, rotation);
-	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	else if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		max_width = glk_max_plane_width(fb, 0, rotation);
  	else
  		max_width = skl_max_plane_width(fb, 0, rotation);
@@ -3564,16 +3564,16 @@ i9xx_plane_max_stride(struct intel_plane *plane,
  		      u32 pixel_format, u64 modifier,
  		      unsigned int rotation)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
- if (!HAS_GMCH(dev_priv)) {
+	if (!HAS_GMCH(i915)) {
  		return 32*1024;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
+	} else if (INTEL_GEN(i915) >= 4) {
  		if (modifier == I915_FORMAT_MOD_X_TILED)
  			return 16*1024;
  		else
  			return 32*1024;
-	} else if (INTEL_GEN(dev_priv) >= 3) {
+	} else if (INTEL_GEN(i915) >= 3) {
  		if (modifier == I915_FORMAT_MOD_X_TILED)
  			return 8*1024;
  		else
@@ -3589,7 +3589,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
  static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 dspcntr = 0;
if (crtc_state->gamma_enable)
@@ -3598,7 +3598,7 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
  	if (crtc_state->csc_enable)
  		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 5)
+	if (INTEL_GEN(i915) < 5)
  		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
return dspcntr;
@@ -3607,7 +3607,7 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
  static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
  			  const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
@@ -3615,8 +3615,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
dspcntr = DISPLAY_PLANE_ENABLE; - if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
-	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+	if (IS_G4X(i915) || IS_GEN(i915, 5) ||
+	    IS_GEN(i915, 6) || IS_IVYBRIDGE(i915))
  		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -3646,7 +3646,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
  		return 0;
  	}
- if (INTEL_GEN(dev_priv) >= 4 &&
+	if (INTEL_GEN(i915) >= 4 &&
  	    fb->modifier == I915_FORMAT_MOD_X_TILED)
  		dspcntr |= DISPPLANE_TILED;
@@ -3661,7 +3661,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	int src_x, src_y;
  	u32 offset;
@@ -3679,7 +3679,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); - if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
  							    plane_state, 0);
  	else
@@ -3694,7 +3694,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
  			   (src_y << 16) - plane_state->base.src.y1);
/* HSW/BDW do this automagically in hardware */
-	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+	if (!IS_HASWELL(i915) && !IS_BROADWELL(i915)) {
  		unsigned int rotation = plane_state->base.rotation;
  		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
  		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
@@ -3752,7 +3752,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
  			      const struct intel_crtc_state *crtc_state,
  			      const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
  	u32 linear_offset;
  	int x = plane_state->color_plane[0].x;
@@ -3765,16 +3765,16 @@ static void i9xx_update_plane(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		dspaddr_offset = plane_state->color_plane[0].offset;
  	else
  		dspaddr_offset = linear_offset;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); - if (INTEL_GEN(dev_priv) < 4) {
+	if (INTEL_GEN(i915) < 4) {
  		/* pipesrc and dspsize control the size that is scaled from,
  		 * which should always be the user's requested size.
  		 */
@@ -3782,7 +3782,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
  		I915_WRITE_FW(DSPSIZE(i9xx_plane),
  			      ((crtc_state->pipe_src_h - 1) << 16) |
  			      (crtc_state->pipe_src_w - 1));
-	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+	} else if (IS_CHERRYVIEW(i915) && i9xx_plane == PLANE_B) {
  		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
  		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
  			      ((crtc_state->pipe_src_h - 1) << 16) |
@@ -3790,9 +3790,9 @@ static void i9xx_update_plane(struct intel_plane *plane,
  		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
  	}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
-	} else if (INTEL_GEN(dev_priv) >= 4) {
+	} else if (INTEL_GEN(i915) >= 4) {
  		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
  		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
  	}
@@ -3803,7 +3803,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
  	 * the control register just before the surface register.
  	 */
  	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		I915_WRITE_FW(DSPSURF(i9xx_plane),
  			      intel_plane_ggtt_offset(plane_state) +
  			      dspaddr_offset);
@@ -3812,13 +3812,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
  			      intel_plane_ggtt_offset(plane_state) +
  			      dspaddr_offset);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void i9xx_disable_plane(struct intel_plane *plane,
  			       const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
  	unsigned long irqflags;
  	u32 dspcntr;
@@ -3835,21 +3835,21 @@ static void i9xx_disable_plane(struct intel_plane *plane,
  	 */
  	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
  	else
  		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
  				    enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
  	intel_wakeref_t wakeref;
@@ -3862,7 +3862,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
  	 * display power wells.
  	 */
  	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -3870,13 +3870,13 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, ret = val & DISPLAY_PLANE_ENABLE; - if (INTEL_GEN(dev_priv) >= 5)
+	if (INTEL_GEN(i915) >= 5)
  		*pipe = plane->pipe;
  	else
  		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  			DISPPLANE_SEL_PIPE_SHIFT;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -3884,7 +3884,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
  static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  {
  	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -4091,10 +4091,10 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	u32 plane_ctl = 0;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		return plane_ctl;
if (crtc_state->gamma_enable)
@@ -4109,7 +4109,7 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
  u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
  		  const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
@@ -4118,7 +4118,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE; - if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(i915) < 10 && !IS_GEMINILAKE(i915)) {
  		plane_ctl |= skl_plane_ctl_alpha(plane_state);
  		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
@@ -4133,7 +4133,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
  	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
  	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
- if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(i915) >= 10)
  		plane_ctl |= cnl_plane_ctl_flip(rotation &
  						DRM_MODE_REFLECT_MASK);
@@ -4147,10 +4147,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	u32 plane_color_ctl = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return plane_color_ctl;
if (crtc_state->gamma_enable)
@@ -4165,7 +4165,7 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
  u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
  			const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@@ -4174,7 +4174,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
  	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
  	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+	if (fb->format->is_yuv && !icl_is_hdr_plane(i915, plane->id)) {
  		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
  			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
  		else
@@ -4228,31 +4228,31 @@ __intel_display_resume(struct drm_device *dev,
  	return ret;
  }
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+static bool gpu_reset_clobbers_display(struct drm_i915_private *i915)
  {
-	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
-		intel_has_gpu_reset(dev_priv));
+	return (INTEL_INFO(i915)->gpu_reset_clobbers_display &&
+		intel_has_gpu_reset(i915));
  }
-void intel_prepare_reset(struct drm_i915_private *dev_priv)
+void intel_prepare_reset(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+	struct drm_device *dev = &i915->drm;
+	struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
  	struct drm_atomic_state *state;
  	int ret;
/* reset doesn't touch the display */
  	if (!i915_modparams.force_reset_modeset_test &&
-	    !gpu_reset_clobbers_display(dev_priv))
+	    !gpu_reset_clobbers_display(i915))
  		return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
-	set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
-	wake_up_all(&dev_priv->gpu_error.wait_queue);
+	set_bit(I915_RESET_MODESET, &i915->gpu_error.flags);
+	wake_up_all(&i915->gpu_error.wait_queue);
- if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+	if (atomic_read(&i915->gpu_error.pending_fb_pin)) {
  		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
-		i915_gem_set_wedged(dev_priv);
+		i915_gem_set_wedged(i915);
  	}
/*
@@ -4286,27 +4286,27 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
  		return;
  	}
- dev_priv->modeset_restore_state = state;
+	i915->modeset_restore_state = state;
  	state->acquire_ctx = ctx;
  }
-void intel_finish_reset(struct drm_i915_private *dev_priv)
+void intel_finish_reset(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+	struct drm_device *dev = &i915->drm;
+	struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
  	struct drm_atomic_state *state;
  	int ret;
/* reset doesn't touch the display */
-	if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+	if (!test_bit(I915_RESET_MODESET, &i915->gpu_error.flags))
  		return;
- state = fetch_and_zero(&dev_priv->modeset_restore_state);
+	state = fetch_and_zero(&i915->modeset_restore_state);
  	if (!state)
  		goto unlock;
/* reset doesn't touch the display */
-	if (!gpu_reset_clobbers_display(dev_priv)) {
+	if (!gpu_reset_clobbers_display(i915)) {
  		/* for testing only restore the display */
  		ret = __intel_display_resume(dev, state, ctx);
  		if (ret)
@@ -4316,20 +4316,20 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
  		 * The display has been reset as well,
  		 * so need a full re-initialization.
  		 */
-		intel_pps_unlock_regs_wa(dev_priv);
+		intel_pps_unlock_regs_wa(i915);
  		intel_modeset_init_hw(dev);
-		intel_init_clock_gating(dev_priv);
+		intel_init_clock_gating(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-		if (dev_priv->display.hpd_irq_setup)
-			dev_priv->display.hpd_irq_setup(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
+		spin_lock_irq(&i915->irq_lock);
+		if (i915->display.hpd_irq_setup)
+			i915->display.hpd_irq_setup(i915);
+		spin_unlock_irq(&i915->irq_lock);
ret = __intel_display_resume(dev, state, ctx);
  		if (ret)
  			DRM_ERROR("Restoring old state failed with %i\n", ret);
- intel_hpd_init(dev_priv);
+		intel_hpd_init(i915);
  	}
drm_atomic_state_put(state);
@@ -4338,12 +4338,12 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
  	drm_modeset_acquire_fini(ctx);
  	mutex_unlock(&dev->mode_config.mutex);
- clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+	clear_bit(I915_RESET_MODESET, &i915->gpu_error.flags);
  }
static void icl_set_pipe_chicken(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 tmp;
@@ -4369,7 +4369,7 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
  				     const struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  	crtc->base.mode = new_crtc_state->base.mode;
@@ -4388,26 +4388,26 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
  		   (new_crtc_state->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
  			skylake_pfit_enable(new_crtc_state);
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_PCH_SPLIT(i915)) {
  		if (new_crtc_state->pch_pfit.enabled)
  			ironlake_pfit_enable(new_crtc_state);
  		else if (old_crtc_state->pch_pfit.enabled)
  			ironlake_pfit_disable(old_crtc_state);
  	}
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_set_pipe_chicken(crtc);
  }
static void intel_fdi_normal_train(struct intel_crtc *crtc)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = crtc->pipe;
  	i915_reg_t reg;
  	u32 temp;
@@ -4415,7 +4415,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
  	/* enable normal train */
  	reg = FDI_TX_CTL(pipe);
  	temp = I915_READ(reg);
-	if (IS_IVYBRIDGE(dev_priv)) {
+	if (IS_IVYBRIDGE(i915)) {
  		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  	} else {
@@ -4426,7 +4426,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
  	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  	} else {
@@ -4440,7 +4440,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
  	udelay(1000);
/* IVB wants error correction enabled */
-	if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  			   FDI_FE_ERRC_ENABLE);
  }
@@ -4450,13 +4450,13 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
  				    const struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = crtc->pipe;
  	i915_reg_t reg;
  	u32 temp, tries;
/* FDI needs bits from pipe first */
-	assert_pipe_enabled(dev_priv, pipe);
+	assert_pipe_enabled(i915, pipe);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  	   for train result */
@@ -4551,7 +4551,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
  				const struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = crtc->pipe;
  	i915_reg_t reg;
  	u32 temp, i, retry;
@@ -4584,7 +4584,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
  	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  	} else {
@@ -4628,7 +4628,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
  	temp = I915_READ(reg);
  	temp &= ~FDI_LINK_TRAIN_NONE;
  	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	if (IS_GEN(dev_priv, 6)) {
+	if (IS_GEN(i915, 6)) {
  		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  		/* SNB-B */
  		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -4637,7 +4637,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
  	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  	} else {
@@ -4684,7 +4684,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
  				      const struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = crtc->pipe;
  	i915_reg_t reg;
  	u32 temp, i, j;
@@ -4802,7 +4802,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
  static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	int pipe = intel_crtc->pipe;
  	i915_reg_t reg;
  	u32 temp;
@@ -4839,7 +4839,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
  static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  {
  	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = intel_crtc->pipe;
  	i915_reg_t reg;
  	u32 temp;
@@ -4869,7 +4869,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  static void ironlake_fdi_disable(struct drm_crtc *crtc)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
  	i915_reg_t reg;
@@ -4891,7 +4891,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
  	udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
-	if (HAS_PCH_IBX(dev_priv))
+	if (HAS_PCH_IBX(i915))
  		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
@@ -4903,7 +4903,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
  	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  	} else {
@@ -4919,12 +4919,12 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
  	udelay(100);
  }
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+bool intel_has_pending_fb_unpin(struct drm_i915_private *i915)
  {
  	struct drm_crtc *crtc;
  	bool cleanup_done;
- drm_for_each_crtc(crtc, &dev_priv->drm) {
+	drm_for_each_crtc(crtc, &i915->drm) {
  		struct drm_crtc_commit *commit;
  		spin_lock(&crtc->commit_lock);
  		commit = list_first_entry_or_null(&crtc->commit_list,
@@ -4944,31 +4944,31 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
  	return false;
  }
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *i915)
  {
  	u32 temp;
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); - mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCCTL6, SBI_ICLK);
  	temp |= SBI_SSCCTL_DISABLE;
-	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
  }
/* Program iCLKIP clock to the desired frequency */
  static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int clock = crtc_state->base.adjusted_mode.crtc_clock;
  	u32 divsel, phaseinc, auxdiv, phasedir = 0;
  	u32 temp;
- lpt_disable_iclkip(dev_priv);
+	lpt_disable_iclkip(i915);
/* The iCLK virtual clock root frequency is in MHz,
  	 * but the adjusted_mode->crtc_clock in in KHz. To get the
@@ -5007,30 +5007,30 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
  			phasedir,
  			phaseinc);
- mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
/* Program SSCDIVINTPHASE6 */
-	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
-	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCAUXDIV6, SBI_ICLK);
  	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
-	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCCTL6, SBI_ICLK);
  	temp &= ~SBI_SSCCTL_DISABLE;
-	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
/* Wait for initialization time */
  	udelay(24);
@@ -5038,7 +5038,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
  	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  }
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct drm_i915_private *i915)
  {
  	u32 divsel, phaseinc, auxdiv;
  	u32 iclk_virtual_root_freq = 172800 * 1000;
@@ -5049,25 +5049,25 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  		return 0;
- mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCCTL6, SBI_ICLK);
  	if (temp & SBI_SSCCTL_DISABLE) {
-		mutex_unlock(&dev_priv->sb_lock);
+		mutex_unlock(&i915->sb_lock);
  		return 0;
  	}
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+	temp = intel_sbi_read(i915, SBI_SSCAUXDIV6, SBI_ICLK);
  	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; @@ -5079,7 +5079,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
  						enum pipe pch_transcoder)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -5099,7 +5099,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
  		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
  }
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *i915, bool enable)
  {
  	u32 temp;
@@ -5122,20 +5122,20 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
  static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
switch (crtc->pipe) {
  	case PIPE_A:
  		break;
  	case PIPE_B:
  		if (crtc_state->fdi_lanes > 2)
-			cpt_set_fdi_bc_bifurcation(dev_priv, false);
+			cpt_set_fdi_bc_bifurcation(i915, false);
  		else
-			cpt_set_fdi_bc_bifurcation(dev_priv, true);
+			cpt_set_fdi_bc_bifurcation(i915, true);
break;
  	case PIPE_C:
-		cpt_set_fdi_bc_bifurcation(dev_priv, true);
+		cpt_set_fdi_bc_bifurcation(i915, true);
break;
  	default:
@@ -5185,13 +5185,13 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = crtc->pipe;
  	u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+	assert_pch_transcoder_disabled(i915, pipe);
- if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		ivybridge_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
@@ -5200,18 +5200,18 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
  		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
-	dev_priv->display.fdi_link_train(crtc, crtc_state);
+	i915->display.fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
  	 * mutliplier into the DPLL. */
-	if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
  		temp |= TRANS_DPLL_ENABLE(pipe);
  		sel = TRANS_DPLLB_SEL(pipe);
  		if (crtc_state->shared_dpll ==
-		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
+		    intel_get_shared_dpll_by_id(i915, DPLL_ID_PCH_PLL_B))
  			temp |= sel;
  		else
  			temp &= ~sel;
@@ -5228,13 +5228,13 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
  	intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
-	assert_panel_unlocked(dev_priv, pipe);
+	assert_panel_unlocked(i915, pipe);
  	ironlake_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */
-	if (HAS_PCH_CPT(dev_priv) &&
+	if (HAS_PCH_CPT(i915) &&
  	    intel_crtc_has_dp_encoder(crtc_state)) {
  		const struct drm_display_mode *adjusted_mode =
  			&crtc_state->base.adjusted_mode;
@@ -5268,22 +5268,22 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
  			   const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+	assert_pch_transcoder_disabled(i915, PIPE_A);
lpt_program_iclkip(crtc_state); /* Set transcoder timing. */
  	ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
- lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+	lpt_enable_pch_transcoder(i915, cpu_transcoder);
  }
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	i915_reg_t dslreg = PIPEDSL(pipe);
  	u32 temp;
@@ -5385,7 +5385,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  		&crtc_state->scaler_state;
  	struct intel_crtc *intel_crtc =
  		to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	const struct drm_display_mode *adjusted_mode =
  		&crtc_state->base.adjusted_mode;
@@ -5403,7 +5403,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  	 * Once NV12 is enabled, handle it here while allocating scaler
  	 * for NV12.
  	 */
-	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+	if (INTEL_GEN(i915) >= 9 && crtc_state->base.enable &&
  	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
  		return -EINVAL;
@@ -5442,10 +5442,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  	/* range checks */
  	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-	    (INTEL_GEN(dev_priv) >= 11 &&
+	    (INTEL_GEN(i915) >= 11 &&
  	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
  	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
-	    (INTEL_GEN(dev_priv) < 11 &&
+	    (INTEL_GEN(i915) < 11 &&
  	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
  		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5502,14 +5502,14 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  {
  	struct intel_plane *intel_plane =
  		to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
  	struct drm_framebuffer *fb = plane_state->base.fb;
  	int ret;
  	bool force_detach = !fb || !plane_state->base.visible;
  	bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
-	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+	if (!icl_is_hdr_plane(i915, intel_plane->id) &&
  	    fb && is_planar_yuv_format(fb->format->format))
  		need_scaler = true;
@@ -5582,7 +5582,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
  static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	const struct intel_crtc_scaler_state *scaler_state =
  		&crtc_state->scaler_state;
@@ -5619,7 +5619,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
  static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int pipe = crtc->pipe;
if (crtc_state->pch_pfit.enabled) {
@@ -5627,7 +5627,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
  		 * as some pre-programmed values are broken,
  		 * e.g. x201.
  		 */
-		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+		if (IS_IVYBRIDGE(i915) || IS_HASWELL(i915))
  			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  						 PF_PIPE_SEL_IVB(pipe));
  		else
@@ -5641,7 +5641,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
if (!crtc_state->ips_enabled)
  		return;
@@ -5653,8 +5653,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
  	 */
  	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
- if (IS_BROADWELL(dev_priv)) {
-		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+	if (IS_BROADWELL(i915)) {
+		WARN_ON(sandybridge_pcode_write(i915, DISPLAY_IPS_CONTROL,
  						IPS_ENABLE | IPS_PCODE_CONTROL));
  		/* Quoting Art Runyan: "its not safe to expect any particular
  		 * value in IPS_CTL bit 31 after enabling IPS through the
@@ -5668,7 +5668,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
  		 * and don't wait for vblanks until the end of crtc_enable, then
  		 * the HW state readout code will complain that the expected
  		 * IPS_CTL value is not the one we read. */
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    IPS_CTL, IPS_ENABLE, IPS_ENABLE,
  					    50))
  			DRM_ERROR("Timed out waiting for IPS enable\n");
@@ -5679,19 +5679,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
if (!crtc_state->ips_enabled)
  		return;
- if (IS_BROADWELL(dev_priv)) {
-		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+	if (IS_BROADWELL(i915)) {
+		WARN_ON(sandybridge_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
  		/*
  		 * Wait for PCODE to finish disabling IPS. The BSpec specified
  		 * 42ms timeout value leads to occasional timeouts so use 100ms
  		 * instead.
  		 */
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    IPS_CTL, IPS_ENABLE, 0,
  					    100))
  			DRM_ERROR("Timed out waiting for IPS disable\n");
@@ -5701,7 +5701,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
  	}
/* We need to wait for a vblank before we can disable the plane. */
-	intel_wait_for_vblank(dev_priv, crtc->pipe);
+	intel_wait_for_vblank(i915, crtc->pipe);
  }
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -5735,7 +5735,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
  			  const struct intel_crtc_state *new_crtc_state)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
@@ -5746,12 +5746,12 @@ intel_post_enable_primary(struct drm_crtc *crtc,
  	 * FIXME: Need to fix the logic to work when we turn off all planes
  	 * but leave the pipe running.
  	 */
-	if (IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	if (IS_GEN(i915, 2))
+		intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
-	intel_check_cpu_fifo_underruns(dev_priv);
-	intel_check_pch_fifo_underruns(dev_priv);
+	intel_check_cpu_fifo_underruns(i915);
+	intel_check_pch_fifo_underruns(i915);
  }
/* FIXME get rid of this and use pre_plane_update */
@@ -5759,7 +5759,7 @@ static void
  intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
@@ -5767,8 +5767,8 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  	 * Gen2 reports pipe underruns whenever all planes are disabled.
  	 * So disable underrun reporting before all the planes get disabled.
  	 */
-	if (IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+	if (IS_GEN(i915, 2))
+		intel_set_cpu_fifo_underrun_reporting(i915, pipe, false);
hsw_disable_ips(to_intel_crtc_state(crtc->state)); @@ -5781,16 +5781,16 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  	 * event which is after the vblank start event, so we need to have a
  	 * wait-for-vblank between disabling the plane and the pipe.
  	 */
-	if (HAS_GMCH(dev_priv) &&
-	    intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, pipe);
+	if (HAS_GMCH(i915) &&
+	    intel_set_memory_cxsr(i915, false))
+		intel_wait_for_vblank(i915, pipe);
  }
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
  				       const struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!old_crtc_state->ips_enabled)
  		return false;
@@ -5804,7 +5804,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
  	 *
  	 * Disable IPS before we program the LUT.
  	 */
-	if (IS_HASWELL(dev_priv) &&
+	if (IS_HASWELL(i915) &&
  	    (new_crtc_state->base.color_mgmt_changed ||
  	     new_crtc_state->update_pipe) &&
  	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
@@ -5817,7 +5817,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
  				       const struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!new_crtc_state->ips_enabled)
  		return false;
@@ -5831,7 +5831,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
  	 *
  	 * Re-enable IPS after the LUT has been programmed.
  	 */
-	if (IS_HASWELL(dev_priv) &&
+	if (IS_HASWELL(i915) &&
  	    (new_crtc_state->base.color_mgmt_changed ||
  	     new_crtc_state->update_pipe) &&
  	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
@@ -5848,24 +5848,24 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
  	return !old_crtc_state->ips_enabled;
  }
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
+static bool needs_nv12_wa(struct drm_i915_private *i915,
  			  const struct intel_crtc_state *crtc_state)
  {
  	if (!crtc_state->nv12_planes)
  		return false;
/* WA Display #0827: Gen9:all */
-	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+	if (IS_GEN(i915, 9) && !IS_GEMINILAKE(i915))
  		return true;
return false;
  }
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
+static bool needs_scalerclk_wa(struct drm_i915_private *i915,
  			       const struct intel_crtc_state *crtc_state)
  {
  	/* Wa_2006604312:icl */
-	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(i915))
  		return true;
return false;
@@ -5875,7 +5875,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_atomic_state *old_state = old_crtc_state->base.state;
  	struct intel_crtc_state *pipe_config =
  		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
@@ -5904,13 +5904,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  			intel_post_enable_primary(&crtc->base, pipe_config);
  	}
- if (needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    !needs_nv12_wa(dev_priv, pipe_config))
-		skl_wa_827(dev_priv, crtc->pipe, false);
+	if (needs_nv12_wa(i915, old_crtc_state) &&
+	    !needs_nv12_wa(i915, pipe_config))
+		skl_wa_827(i915, crtc->pipe, false);
- if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-	    !needs_scalerclk_wa(dev_priv, pipe_config))
-		icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+	if (needs_scalerclk_wa(i915, old_crtc_state) &&
+	    !needs_scalerclk_wa(i915, pipe_config))
+		icl_wa_scalerclkgating(i915, crtc->pipe, false);
  }
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5918,7 +5918,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_atomic_state *old_state = old_crtc_state->base.state;
  	struct drm_plane *primary = crtc->base.primary;
  	struct drm_plane_state *old_primary_state =
@@ -5940,20 +5940,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  		 * Gen2 reports pipe underruns whenever all planes are disabled.
  		 * So disable underrun reporting before all the planes get disabled.
  		 */
-		if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
+		if (IS_GEN(i915, 2) && old_primary_state->visible &&
  		    (modeset || !new_primary_state->base.visible))
-			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+			intel_set_cpu_fifo_underrun_reporting(i915, crtc->pipe, false);
  	}
/* Display WA 827 */
-	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    needs_nv12_wa(dev_priv, pipe_config))
-		skl_wa_827(dev_priv, crtc->pipe, true);
+	if (!needs_nv12_wa(i915, old_crtc_state) &&
+	    needs_nv12_wa(i915, pipe_config))
+		skl_wa_827(i915, crtc->pipe, true);
/* Wa_2006604312:icl */
-	if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-	    needs_scalerclk_wa(dev_priv, pipe_config))
-		icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+	if (!needs_scalerclk_wa(i915, old_crtc_state) &&
+	    needs_scalerclk_wa(i915, pipe_config))
+		icl_wa_scalerclkgating(i915, crtc->pipe, true);
/*
  	 * Vblank time updates from the shadow to live plane control register
@@ -5964,9 +5964,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  	 * event which is after the vblank start event, so we need to have a
  	 * wait-for-vblank between disabling the plane and the pipe.
  	 */
-	if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
-	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, crtc->pipe);
+	if (HAS_GMCH(i915) && old_crtc_state->base.active &&
+	    pipe_config->disable_cxsr && intel_set_memory_cxsr(i915, false))
+		intel_wait_for_vblank(i915, crtc->pipe);
/*
  	 * IVB workaround: must disable low power watermarks for at least
@@ -5977,7 +5977,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  	 */
  	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
  	    old_crtc_state->base.active)
-		intel_wait_for_vblank(dev_priv, crtc->pipe);
+		intel_wait_for_vblank(i915, crtc->pipe);
/*
  	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
@@ -6000,8 +6000,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  	 * we'll continue to update watermarks the old way, if flags tell
  	 * us to.
  	 */
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state,
+	if (i915->display.initial_watermarks != NULL)
+		i915->display.initial_watermarks(old_intel_state,
  						     pipe_config);
  	else if (pipe_config->update_wm_pre)
  		intel_update_watermarks(crtc);
@@ -6010,7 +6010,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  static void intel_crtc_disable_planes(struct intel_atomic_state *state,
  				      struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct intel_crtc_state *new_crtc_state =
  		intel_atomic_get_new_crtc_state(state, crtc);
  	unsigned int update_mask = new_crtc_state->update_planes;
@@ -6032,7 +6032,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
  			fb_bits |= plane->frontbuffer_bit;
  	}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+	intel_frontbuffer_flip(i915, fb_bits);
  }
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -6190,7 +6190,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  {
  	struct drm_crtc *crtc = pipe_config->base.crtc;
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
  	struct intel_atomic_state *old_intel_state =
@@ -6209,8 +6209,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  	 *
  	 * Spurious PCH underruns also occur during PCH enabling.
  	 */
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, false);
+	intel_set_pch_fifo_underrun_reporting(i915, pipe, false);
if (pipe_config->has_pch_encoder)
  		intel_prepare_shared_dpll(pipe_config);
@@ -6238,8 +6238,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  		 * enabling. */
  		ironlake_fdi_pll_enable(pipe_config);
  	} else {
-		assert_fdi_tx_disabled(dev_priv, pipe);
-		assert_fdi_rx_disabled(dev_priv, pipe);
+		assert_fdi_tx_disabled(i915, pipe);
+		assert_fdi_rx_disabled(i915, pipe);
  	}
ironlake_pfit_enable(pipe_config);
@@ -6253,8 +6253,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  	/* update DSPCNTR to configure gamma for pipe bottom color */
  	intel_disable_primary_plane(pipe_config);
- if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+	if (i915->display.initial_watermarks != NULL)
+		i915->display.initial_watermarks(old_intel_state, pipe_config);
  	intel_enable_pipe(pipe_config);
if (pipe_config->has_pch_encoder)
@@ -6265,7 +6265,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state); - if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		cpt_verify_modeset(dev, intel_crtc->pipe);
/*
@@ -6275,11 +6275,11 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  	 * in case there are more corner cases we don't know about.
  	 */
  	if (pipe_config->has_pch_encoder) {
-		intel_wait_for_vblank(dev_priv, pipe);
-		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(i915, pipe);
+		intel_wait_for_vblank(i915, pipe);
  	}
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
+	intel_set_pch_fifo_underrun_reporting(i915, pipe, true);
  }
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -6288,7 +6288,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
  }
-static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *i915,
  					    enum pipe pipe, bool apply)
  {
  	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
@@ -6304,7 +6304,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
@@ -6319,7 +6319,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  				struct drm_atomic_state *old_state)
  {
  	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
@@ -6359,18 +6359,18 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  	if (!transcoder_is_dsi(cpu_transcoder))
  		haswell_set_pipeconf(pipe_config);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+	if (INTEL_GEN(i915) >= 9 || IS_BROADWELL(i915))
  		bdw_set_pipemisc(pipe_config);
intel_crtc->active = true; /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
-	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+	psl_clkgate_wa = (IS_GEMINILAKE(i915) || IS_CANNONLAKE(i915)) &&
  			 pipe_config->pch_pfit.enabled;
  	if (psl_clkgate_wa)
-		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+		glk_pipe_scaler_clock_gating_wa(i915, pipe, true);
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		skylake_pfit_enable(pipe_config);
  	else
  		ironlake_pfit_enable(pipe_config);
@@ -6382,20 +6382,20 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  	intel_color_load_luts(pipe_config);
  	intel_color_commit(pipe_config);
  	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
-	if (INTEL_GEN(dev_priv) < 9)
+	if (INTEL_GEN(i915) < 9)
  		intel_disable_primary_plane(pipe_config);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_set_pipe_chicken(intel_crtc);
intel_ddi_set_pipe_settings(pipe_config);
  	if (!transcoder_is_dsi(cpu_transcoder))
  		intel_ddi_enable_transcoder_func(pipe_config);
- if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+	if (i915->display.initial_watermarks != NULL)
+		i915->display.initial_watermarks(old_intel_state, pipe_config);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_pipe_mbus_enable(intel_crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
@@ -6414,23 +6414,23 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  	intel_encoders_enable(crtc, pipe_config, old_state);
if (psl_clkgate_wa) {
-		intel_wait_for_vblank(dev_priv, pipe);
-		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
+		intel_wait_for_vblank(i915, pipe);
+		glk_pipe_scaler_clock_gating_wa(i915, pipe, false);
  	}
/* If we change the relative order between pipe/planes enabling, we need
  	 * to change the workaround. */
  	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
-	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
-		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
-		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+	if (IS_HASWELL(i915) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(i915, hsw_workaround_pipe);
+		intel_wait_for_vblank(i915, hsw_workaround_pipe);
  	}
  }
static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
@@ -6447,7 +6447,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  {
  	struct drm_crtc *crtc = old_crtc_state->base.crtc;
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
@@ -6456,8 +6456,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  	 * pipe is already disabled, but FDI RX/TX is still enabled.
  	 * Happens at least with VGA+HDMI cloning. Suppress them.
  	 */
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, false);
+	intel_set_pch_fifo_underrun_reporting(i915, pipe, false);
intel_encoders_disable(crtc, old_crtc_state, old_state); @@ -6474,9 +6474,9 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (old_crtc_state->has_pch_encoder) {
-		ironlake_disable_pch_transcoder(dev_priv, pipe);
+		ironlake_disable_pch_transcoder(i915, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+		if (HAS_PCH_CPT(i915)) {
  			i915_reg_t reg;
  			u32 temp;
@@ -6497,15 +6497,15 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  		ironlake_fdi_pll_disable(intel_crtc);
  	}
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
+	intel_set_pch_fifo_underrun_reporting(i915, pipe, true);
  }
static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  				 struct drm_atomic_state *old_state)
  {
  	struct drm_crtc *crtc = old_crtc_state->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -6526,7 +6526,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_dsc_disable(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		skylake_scaler_disable(intel_crtc);
  	else
  		ironlake_pfit_disable(old_crtc_state);
@@ -6539,7 +6539,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!crtc_state->gmch_pfit.control)
  		return;
@@ -6549,7 +6549,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
  	 * according to register description and PRM.
  	 */
  	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(i915, crtc->pipe);
I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
  	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
@@ -6559,31 +6559,31 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
  	I915_WRITE(BCLRPAT(crtc->pipe), 0);
  }
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+bool intel_port_is_combophy(struct drm_i915_private *i915, enum port port)
  {
  	if (port == PORT_NONE)
  		return false;
- if (IS_ELKHARTLAKE(dev_priv))
+	if (IS_ELKHARTLAKE(i915))
  		return port <= PORT_C;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return port <= PORT_B;
return false;
  }
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+bool intel_port_is_tc(struct drm_i915_private *i915, enum port port)
  {
-	if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 11 && !IS_ELKHARTLAKE(i915))
  		return port >= PORT_C && port <= PORT_F;
return false;
  }
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+enum tc_port intel_port_to_tc(struct drm_i915_private *i915, enum port port)
  {
-	if (!intel_port_is_tc(dev_priv, port))
+	if (!intel_port_is_tc(i915, port))
  		return PORT_TC_NONE;
return port - PORT_C;
@@ -6636,7 +6636,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
  				  struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_encoder *encoder;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	enum pipe pipe = intel_crtc->pipe;
@@ -6658,7 +6658,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
  		mask |= BIT_ULL(intel_encoder->power_domain);
  	}
- if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+	if (HAS_DDI(i915) && crtc_state->has_audio)
  		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
@@ -6671,7 +6671,7 @@ static u64
  modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  			       struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	enum intel_display_power_domain domain;
  	u64 domains, new_domains, old_domains;
@@ -6683,18 +6683,18 @@ modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  	domains = new_domains & ~old_domains;
for_each_power_domain(domain, domains)
-		intel_display_power_get(dev_priv, domain);
+		intel_display_power_get(i915, domain);
return old_domains & ~new_domains;
  }
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+static void modeset_put_power_domains(struct drm_i915_private *i915,
  				      u64 domains)
  {
  	enum intel_display_power_domain domain;
for_each_power_domain(domain, domains)
-		intel_display_power_put_unchecked(dev_priv, domain);
+		intel_display_power_put_unchecked(i915, domain);
  }
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6704,7 +6704,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  		to_intel_atomic_state(old_state);
  	struct drm_crtc *crtc = pipe_config->base.crtc;
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
@@ -6717,7 +6717,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  	intel_set_pipe_timings(pipe_config);
  	intel_set_pipe_src_size(pipe_config);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_B) {
  		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  		I915_WRITE(CHV_CANVAS(pipe), 0);
  	}
@@ -6726,11 +6726,11 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); - if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		chv_prepare_pll(intel_crtc, pipe_config);
  		chv_enable_pll(intel_crtc, pipe_config);
  	} else {
@@ -6747,7 +6747,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  	/* update DSPCNTR to configure gamma for pipe bottom color */
  	intel_disable_primary_plane(pipe_config);
- dev_priv->display.initial_watermarks(old_intel_state,
+	i915->display.initial_watermarks(old_intel_state,
  					     pipe_config);
  	intel_enable_pipe(pipe_config);
@@ -6760,7 +6760,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
  	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
@@ -6773,7 +6773,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  		to_intel_atomic_state(old_state);
  	struct drm_crtc *crtc = pipe_config->base.crtc;
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	enum pipe pipe = intel_crtc->pipe;
@@ -6792,8 +6792,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_crtc->active = true; - if (!IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	if (!IS_GEN(i915, 2))
+		intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state); @@ -6806,8 +6806,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  	/* update DSPCNTR to configure gamma for pipe bottom color */
  	intel_disable_primary_plane(pipe_config);
- if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state,
+	if (i915->display.initial_watermarks != NULL)
+		i915->display.initial_watermarks(old_intel_state,
  						     pipe_config);
  	else
  		intel_update_watermarks(intel_crtc);
@@ -6822,12 +6822,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!old_crtc_state->gmch_pfit.control)
  		return;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(i915, crtc->pipe);
DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
  		      I915_READ(PFIT_CONTROL));
@@ -6839,7 +6839,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  {
  	struct drm_crtc *crtc = old_crtc_state->base.crtc;
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
@@ -6847,8 +6847,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  	 * On gen2 planes are double buffered but the pipe isn't, so we must
  	 * wait for planes to fully turn off before disabling the pipe.
  	 */
-	if (IS_GEN(dev_priv, 2))
-		intel_wait_for_vblank(dev_priv, pipe);
+	if (IS_GEN(i915, 2))
+		intel_wait_for_vblank(i915, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state); @@ -6862,25 +6862,25 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
-		if (IS_CHERRYVIEW(dev_priv))
-			chv_disable_pll(dev_priv, pipe);
-		else if (IS_VALLEYVIEW(dev_priv))
-			vlv_disable_pll(dev_priv, pipe);
+		if (IS_CHERRYVIEW(i915))
+			chv_disable_pll(i915, pipe);
+		else if (IS_VALLEYVIEW(i915))
+			vlv_disable_pll(i915, pipe);
  		else
  			i9xx_disable_pll(old_crtc_state);
  	}
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); - if (!IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+	if (!IS_GEN(i915, 2))
+		intel_set_cpu_fifo_underrun_reporting(i915, pipe, false);
- if (!dev_priv->display.initial_watermarks)
+	if (!i915->display.initial_watermarks)
  		intel_update_watermarks(intel_crtc);
/* clock the pipe down to 640x480@60 to potentially save power */
-	if (IS_I830(dev_priv))
-		i830_enable_pipe(dev_priv, pipe);
+	if (IS_I830(i915))
+		i830_enable_pipe(i915, pipe);
  }
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
@@ -6888,9 +6888,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
  {
  	struct intel_encoder *encoder;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_bw_state *bw_state =
-		to_intel_bw_state(dev_priv->bw_obj.state);
+		to_intel_bw_state(i915->bw_obj.state);
  	enum intel_display_power_domain domain;
  	struct intel_plane *plane;
  	u64 domains;
@@ -6901,7 +6901,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
  	if (!intel_crtc->active)
  		return;
- for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+	for_each_intel_plane_on_crtc(&i915->drm, intel_crtc, plane) {
  		const struct intel_plane_state *plane_state =
  			to_intel_plane_state(plane->base.state);
@@ -6924,7 +6924,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, WARN_ON(IS_ERR(crtc_state) || ret); - dev_priv->display.crtc_disable(crtc_state, state);
+	i915->display.crtc_disable(crtc_state, state);
drm_atomic_state_put(state); @@ -6947,12 +6947,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, domains = intel_crtc->enabled_power_domains;
  	for_each_power_domain(domain, domains)
-		intel_display_power_put_unchecked(dev_priv, domain);
+		intel_display_power_put_unchecked(i915, domain);
  	intel_crtc->enabled_power_domains = 0;
- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
-	dev_priv->min_cdclk[intel_crtc->pipe] = 0;
-	dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+	i915->active_crtcs &= ~(1 << intel_crtc->pipe);
+	i915->min_cdclk[intel_crtc->pipe] = 0;
+	i915->min_voltage_level[intel_crtc->pipe] = 0;
bw_state->data_rate[intel_crtc->pipe] = 0;
  	bw_state->num_active_planes[intel_crtc->pipe] = 0;
@@ -6964,7 +6964,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
   */
  int intel_display_suspend(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_atomic_state *state;
  	int ret;
@@ -6973,7 +6973,7 @@ int intel_display_suspend(struct drm_device *dev)
  	if (ret)
  		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  	else
-		dev_priv->modeset_restore_state = state;
+		i915->modeset_restore_state = state;
  	return ret;
  }
@@ -7035,7 +7035,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  				     struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_atomic_state *state = pipe_config->base.state;
  	struct intel_crtc *other_crtc;
  	struct intel_crtc_state *other_crtc_state;
@@ -7048,7 +7048,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  		return -EINVAL;
  	}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		if (pipe_config->fdi_lanes > 2) {
  			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  				      pipe_config->fdi_lanes);
@@ -7058,7 +7058,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  		}
  	}
- if (INTEL_INFO(dev_priv)->num_pipes == 2)
+	if (INTEL_INFO(i915)->num_pipes == 2)
  		return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7069,7 +7069,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  		if (pipe_config->fdi_lanes <= 2)
  			return 0;
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+		other_crtc = intel_get_crtc_for_pipe(i915, PIPE_C);
  		other_crtc_state =
  			intel_atomic_get_crtc_state(state, other_crtc);
  		if (IS_ERR(other_crtc_state))
@@ -7088,7 +7088,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  			return -EINVAL;
  		}
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+		other_crtc = intel_get_crtc_for_pipe(i915, PIPE_B);
  		other_crtc_state =
  			intel_atomic_get_crtc_state(state, other_crtc);
  		if (IS_ERR(other_crtc_state))
@@ -7156,7 +7156,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* IPS only exists on ULT machines and is tied to pipe A. */
  	if (!hsw_crtc_supports_ips(crtc))
@@ -7175,8 +7175,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
  	 *
  	 * Should measure whether using a lower cdclk w/o IPS
  	 */
-	if (IS_BROADWELL(dev_priv) &&
-	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
+	if (IS_BROADWELL(i915) &&
+	    crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
  		return false;
return true;
@@ -7184,7 +7184,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(crtc_state->base.crtc->dev);
  	struct intel_atomic_state *intel_state =
  		to_intel_atomic_state(crtc_state->base.state);
@@ -7206,7 +7206,7 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
  		return false;
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-	if (IS_BROADWELL(dev_priv) &&
+	if (IS_BROADWELL(i915) &&
  	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
  		return false;
@@ -7215,11 +7215,11 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  {
-	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
-	return INTEL_GEN(dev_priv) < 4 &&
-		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+	return INTEL_GEN(i915) < 4 &&
+		(crtc->pipe == PIPE_A || IS_I915G(i915));
  }
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
@@ -7259,9 +7259,9 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
  		crtc_state->pixel_rate =
  			crtc_state->base.adjusted_mode.crtc_clock;
@@ -7273,12 +7273,12 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
  static int intel_crtc_compute_config(struct intel_crtc *crtc,
  				     struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int clock_limit = dev_priv->max_dotclk_freq;
+	int clock_limit = i915->max_dotclk_freq;
- if (INTEL_GEN(dev_priv) < 4) {
-		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+	if (INTEL_GEN(i915) < 4) {
+		clock_limit = i915->max_cdclk_freq * 9 / 10;
/*
  		 * Enable double wide mode when the dot clock
@@ -7286,7 +7286,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
  		 */
  		if (intel_crtc_supports_double_wide(crtc) &&
  		    adjusted_mode->crtc_clock > clock_limit) {
-			clock_limit = dev_priv->max_dotclk_freq;
+			clock_limit = i915->max_dotclk_freq;
  			pipe_config->double_wide = true;
  		}
  	}
@@ -7323,7 +7323,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
  		}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
-		    intel_is_dual_link_lvds(dev_priv)) {
+		    intel_is_dual_link_lvds(i915)) {
  			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
  			return -EINVAL;
  		}
@@ -7332,7 +7332,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
  	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
  	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  	 */
-	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+	if ((INTEL_GEN(i915) > 4 || IS_G4X(i915)) &&
  		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  		return -EINVAL;
@@ -7392,12 +7392,12 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
  		    constant_n);
  }
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+static inline bool intel_panel_use_ssc(struct drm_i915_private *i915)
  {
  	if (i915_modparams.panel_use_ssc >= 0)
  		return i915_modparams.panel_use_ssc != 0;
-	return dev_priv->vbt.lvds_use_ssc
-		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+	return i915->vbt.lvds_use_ssc
+		&& !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
  }
static u32 pnv_dpll_compute_fp(struct dpll *dpll)
@@ -7414,10 +7414,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  				     struct intel_crtc_state *crtc_state,
  				     struct dpll *reduced_clock)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 fp, fp2 = 0;
- if (IS_PINEVIEW(dev_priv)) {
+	if (IS_PINEVIEW(i915)) {
  		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  		if (reduced_clock)
  			fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -7437,7 +7437,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  	}
  }
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+static void vlv_pllb_recal_opamp(struct drm_i915_private *i915, enum pipe
  		pipe)
  {
  	u32 reg_val;
@@ -7446,31 +7446,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
  	 * and set it to a reasonable value instead.
  	 */
-	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+	reg_val = vlv_dpio_read(i915, pipe, VLV_PLL_DW9(1));
  	reg_val &= 0xffffff00;
  	reg_val |= 0x00000030;
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+	reg_val = vlv_dpio_read(i915, pipe, VLV_REF_DW13);
  	reg_val &= 0x00ffffff;
  	reg_val |= 0x8c000000;
-	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+	vlv_dpio_write(i915, pipe, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+	reg_val = vlv_dpio_read(i915, pipe, VLV_PLL_DW9(1));
  	reg_val &= 0xffffff00;
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+	reg_val = vlv_dpio_read(i915, pipe, VLV_REF_DW13);
  	reg_val &= 0x00ffffff;
  	reg_val |= 0xb0000000;
-	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+	vlv_dpio_write(i915, pipe, VLV_REF_DW13, reg_val);
  }
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
  					 const struct intel_link_m_n *m_n)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7479,17 +7479,17 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
  	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  }
-static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+static bool transcoder_has_m2_n2(struct drm_i915_private *i915,
  				 enum transcoder transcoder)
  {
-	if (IS_HASWELL(dev_priv))
+	if (IS_HASWELL(i915))
  		return transcoder == TRANSCODER_EDP;
/*
  	 * Strictly speaking some registers are available before
  	 * gen7, but we only support DRRS on gen7+
  	 */
-	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
+	return IS_GEN(i915, 7) || IS_CHERRYVIEW(i915);
  }
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -7497,11 +7497,11 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
  					 const struct intel_link_m_n *m2_n2)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (INTEL_GEN(dev_priv) >= 5) {
+	if (INTEL_GEN(i915) >= 5) {
  		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -7511,7 +7511,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
  		 * (to make sure the registers are not unnecessarily accessed).
  		 */
  		if (m2_n2 && crtc_state->has_drrs &&
-		    transcoder_has_m2_n2(dev_priv, transcoder)) {
+		    transcoder_has_m2_n2(i915, transcoder)) {
  			I915_WRITE(PIPE_DATA_M2(transcoder),
  					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -7588,7 +7588,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
  			    const struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum pipe pipe = crtc->pipe;
  	u32 mdiv;
  	u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7603,7 +7603,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
  	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  		return;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
bestn = pipe_config->dpll.n;
  	bestm1 = pipe_config->dpll.m1;
@@ -7615,18 +7615,18 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* PLL B needs special handling */
  	if (pipe == PIPE_B)
-		vlv_pllb_recal_opamp(dev_priv, pipe);
+		vlv_pllb_recal_opamp(i915, pipe);
/* Set up Tx target for periodic Rcomp update */
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
-	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+	reg_val = vlv_dpio_read(i915, pipe, VLV_PLL_DW8(pipe));
  	reg_val &= 0x00ffffff;
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
-	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+	vlv_dpio_write(i915, pipe, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
  	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -7640,55 +7640,55 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
  	 * Note: don't use the DAC post divider as it seems unstable.
  	 */
  	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
  	if (pipe_config->port_clock == 162000 ||
  	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
  	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
-		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+		vlv_dpio_write(i915, pipe, VLV_PLL_DW10(pipe),
  				 0x009f0003);
  	else
-		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+		vlv_dpio_write(i915, pipe, VLV_PLL_DW10(pipe),
  				 0x00d0000f);
if (intel_crtc_has_dp_encoder(pipe_config)) {
  		/* Use SSC source */
  		if (pipe == PIPE_A)
-			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+			vlv_dpio_write(i915, pipe, VLV_PLL_DW5(pipe),
  					 0x0df40000);
  		else
-			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+			vlv_dpio_write(i915, pipe, VLV_PLL_DW5(pipe),
  					 0x0df70000);
  	} else { /* HDMI or VGA */
  		/* Use bend source */
  		if (pipe == PIPE_A)
-			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+			vlv_dpio_write(i915, pipe, VLV_PLL_DW5(pipe),
  					 0x0df70000);
  		else
-			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+			vlv_dpio_write(i915, pipe, VLV_PLL_DW5(pipe),
  					 0x0df40000);
  	}
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+	coreclk = vlv_dpio_read(i915, pipe, VLV_PLL_DW7(pipe));
  	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  	if (intel_crtc_has_dp_encoder(pipe_config))
  		coreclk |= 0x01000000;
-	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+	vlv_dpio_write(i915, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
static void chv_prepare_pll(struct intel_crtc *crtc,
  			    const struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum pipe pipe = crtc->pipe;
  	enum dpio_channel port = vlv_pipe_to_channel(pipe);
  	u32 loopfilter, tribuf_calcntr;
@@ -7714,42 +7714,42 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
  	dpio_val = 0;
  	loopfilter = 0;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* p1 and p2 divider */
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW13(port),
  			5 << DPIO_CHV_S1_DIV_SHIFT |
  			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  			1 << DPIO_CHV_K_DIV_SHIFT);
/* Feedback post-divider - m2 */
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW0(port), bestm2);
/* Feedback refclk divider - n and m1 */
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW1(port),
  			DPIO_CHV_M1_DIV_BY_2 |
  			1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
-	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+	dpio_val = vlv_dpio_read(i915, pipe, CHV_PLL_DW3(port));
  	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  	if (bestm2_frac)
  		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW3(port), dpio_val);
/* Program digital lock detect threshold */
-	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+	dpio_val = vlv_dpio_read(i915, pipe, CHV_PLL_DW9(port));
  	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  	if (!bestm2_frac)
  		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
  	if (vco == 5400000) {
@@ -7774,24 +7774,24 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
  		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  		tribuf_calcntr = 0;
  	}
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW6(port), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+	dpio_val = vlv_dpio_read(i915, pipe, CHV_PLL_DW8(port));
  	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
-	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+	vlv_dpio_write(i915, pipe, CHV_PLL_DW8(port), dpio_val);
/* AFC Recal */
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
-			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW14(port),
+			vlv_dpio_read(i915, pipe, CHV_CMN_DW14(port)) |
  			DPIO_AFC_RECAL);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
/**
   * vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @i915: i915 private structure
   * @pipe: pipe PLL to enable
   * @dpll: PLL configuration
   *
@@ -7799,10 +7799,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
   * in cases where we need the PLL enabled even when @pipe is not going to
   * be enabled.
   */
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *i915, enum pipe pipe,
  		     const struct dpll *dpll)
  {
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
  	struct intel_crtc_state *pipe_config;
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -7813,7 +7813,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
  	pipe_config->pixel_multiplier = 1;
  	pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		chv_compute_dpll(crtc, pipe_config);
  		chv_prepare_pll(crtc, pipe_config);
  		chv_enable_pll(crtc, pipe_config);
@@ -7830,25 +7830,25 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
/**
   * vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @i915: i915 private structure
   * @pipe: pipe PLL to disable
   *
   * Disable the PLL for @pipe. To be used in cases where we need
   * the PLL enabled even when @pipe is not going to be enabled.
   */
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *i915, enum pipe pipe)
  {
-	if (IS_CHERRYVIEW(dev_priv))
-		chv_disable_pll(dev_priv, pipe);
+	if (IS_CHERRYVIEW(i915))
+		chv_disable_pll(i915, pipe);
  	else
-		vlv_disable_pll(dev_priv, pipe);
+		vlv_disable_pll(i915, pipe);
  }
static void i9xx_compute_dpll(struct intel_crtc *crtc,
  			      struct intel_crtc_state *crtc_state,
  			      struct dpll *reduced_clock)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 dpll;
  	struct dpll *clock = &crtc_state->dpll;
@@ -7861,8 +7861,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
  	else
  		dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+	if (IS_I945G(i915) || IS_I945GM(i915) ||
+	    IS_G33(i915) || IS_PINEVIEW(i915)) {
  		dpll |= (crtc_state->pixel_multiplier - 1)
  			<< SDVO_MULTIPLIER_SHIFT_HIRES;
  	}
@@ -7875,11 +7875,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
  		dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
-	if (IS_PINEVIEW(dev_priv))
+	if (IS_PINEVIEW(i915))
  		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  	else {
  		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-		if (IS_G4X(dev_priv) && reduced_clock)
+		if (IS_G4X(i915) && reduced_clock)
  			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  	}
  	switch (clock->p2) {
@@ -7896,13 +7896,13 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
  		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  		break;
  	}
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
  		dpll |= PLL_REF_INPUT_TVCLKINBC;
  	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-		 intel_panel_use_ssc(dev_priv))
+		 intel_panel_use_ssc(i915))
  		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  	else
  		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7910,7 +7910,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
  	dpll |= DPLL_VCO_ENABLE;
  	crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
  		crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -7922,7 +7922,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
  			      struct dpll *reduced_clock)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 dpll;
  	struct dpll *clock = &crtc_state->dpll;
@@ -7953,12 +7953,12 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
  	 * both DPLLS. The spec says we should disable the DVO 2X clock
  	 * when not needed, but this seems to work fine in practice.
  	 */
-	if (IS_I830(dev_priv) ||
+	if (IS_I830(i915) ||
  	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
  		dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    intel_panel_use_ssc(dev_priv))
+	    intel_panel_use_ssc(i915))
  		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  	else
  		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7970,7 +7970,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
  static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
@@ -7996,7 +7996,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
  			vsyncshift += adjusted_mode->crtc_htotal;
  	}
- if (INTEL_GEN(dev_priv) > 3)
+	if (INTEL_GEN(i915) > 3)
  		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8023,7 +8023,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
  	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
  	 * bits. */
-	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+	if (IS_HASWELL(i915) && cpu_transcoder == TRANSCODER_EDP &&
  	    (pipe == PIPE_B || pipe == PIPE_C))
  		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
@@ -8032,7 +8032,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
  static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
@@ -8047,7 +8047,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
  				   struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  	u32 tmp;
@@ -8092,7 +8092,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  				    struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 tmp;
tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -8129,21 +8129,21 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 pipeconf;
pipeconf = 0; /* we keep both pipes enabled on 830 */
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
if (crtc_state->double_wide)
  		pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
-	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv)) {
+	if (IS_G4X(i915) || IS_VALLEYVIEW(i915) ||
+	    IS_CHERRYVIEW(i915)) {
  		/* Bspec claims that we can't use dithering for 30bpp pipes. */
  		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
  			pipeconf |= PIPECONF_DITHER_EN |
@@ -8166,7 +8166,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
  	}
if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
-		if (INTEL_GEN(dev_priv) < 4 ||
+		if (INTEL_GEN(i915) < 4 ||
  		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
  			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  		else
@@ -8175,7 +8175,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
  		pipeconf |= PIPECONF_PROGRESSIVE;
  	}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  	     crtc_state->limited_color_range)
  		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
@@ -8189,7 +8189,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  				   struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	const struct intel_limit *limit;
  	int refclk = 48000;
@@ -8197,8 +8197,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  	       sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
+		if (intel_panel_use_ssc(i915)) {
+			refclk = i915->vbt.lvds_ssc_freq;
  			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  		}
@@ -8224,7 +8224,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  				  struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct intel_limit *limit;
  	int refclk = 96000;
@@ -8232,12 +8232,12 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  	       sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
+		if (intel_panel_use_ssc(i915)) {
+			refclk = i915->vbt.lvds_ssc_freq;
  			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  		}
- if (intel_is_dual_link_lvds(dev_priv))
+		if (intel_is_dual_link_lvds(i915))
  			limit = &intel_limits_g4x_dual_channel_lvds;
  		else
  			limit = &intel_limits_g4x_single_channel_lvds;
@@ -8267,7 +8267,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  				  struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	const struct intel_limit *limit;
  	int refclk = 96000;
@@ -8275,8 +8275,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  	       sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
+		if (intel_panel_use_ssc(i915)) {
+			refclk = i915->vbt.lvds_ssc_freq;
  			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  		}
@@ -8301,7 +8301,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  				   struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	const struct intel_limit *limit;
  	int refclk = 96000;
@@ -8309,8 +8309,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  	       sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
-			refclk = dev_priv->vbt.lvds_ssc_freq;
+		if (intel_panel_use_ssc(i915)) {
+			refclk = i915->vbt.lvds_ssc_freq;
  			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  		}
@@ -8373,22 +8373,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  	return 0;
  }
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pfit(struct drm_i915_private *i915)
  {
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		return false;
- return INTEL_GEN(dev_priv) >= 4 ||
-		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+	return INTEL_GEN(i915) >= 4 ||
+		IS_PINEVIEW(i915) || IS_MOBILE(i915);
  }
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  				 struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 tmp;
- if (!i9xx_has_pfit(dev_priv))
+	if (!i9xx_has_pfit(i915))
  		return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8396,7 +8396,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  		return;
/* Check whether the pfit is attached to our pipe. */
-	if (INTEL_GEN(dev_priv) < 4) {
+	if (INTEL_GEN(i915) < 4) {
  		if (crtc->pipe != PIPE_B)
  			return;
  	} else {
@@ -8412,7 +8412,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  			       struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = pipe_config->cpu_transcoder;
  	struct dpll clock;
  	u32 mdiv;
@@ -8422,9 +8422,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  		return;
- vlv_dpio_get(dev_priv);
-	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
-	vlv_dpio_put(dev_priv);
+	vlv_dpio_get(i915);
+	mdiv = vlv_dpio_read(i915, pipe, VLV_PLL_DW3(pipe));
+	vlv_dpio_put(i915);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  	clock.m2 = mdiv & DPIO_M2DIV_MASK;
@@ -8440,7 +8440,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  			      struct intel_initial_plane_config *plane_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
  	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
  	enum pipe pipe;
@@ -8467,7 +8467,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(DSPCNTR(i9xx_plane)); - if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		if (val & DISPPLANE_TILED) {
  			plane_config->tiling = I915_TILING_X;
  			fb->modifier = I915_FORMAT_MOD_X_TILED;
@@ -8477,7 +8477,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  			plane_config->rotation = DRM_MODE_ROTATE_180;
  	}
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_B &&
  	    val & DISPPLANE_MIRROR)
  		plane_config->rotation |= DRM_MODE_REFLECT_X;
@@ -8485,10 +8485,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  	fourcc = i9xx_format_to_fourcc(pixel_format);
  	fb->format = drm_format_info(fourcc);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		offset = I915_READ(DSPOFFSET(i9xx_plane));
  		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
+	} else if (INTEL_GEN(i915) >= 4) {
  		if (plane_config->tiling)
  			offset = I915_READ(DSPTILEOFF(i9xx_plane));
  		else
@@ -8522,7 +8522,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
  			       struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = pipe_config->cpu_transcoder;
  	enum dpio_channel port = vlv_pipe_to_channel(pipe);
  	struct dpll clock;
@@ -8533,13 +8533,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
  	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  		return;
- vlv_dpio_get(dev_priv);
-	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
-	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
-	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
-	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
-	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
-	vlv_dpio_put(dev_priv);
+	vlv_dpio_get(i915);
+	cmn_dw13 = vlv_dpio_read(i915, pipe, CHV_CMN_DW13(port));
+	pll_dw0 = vlv_dpio_read(i915, pipe, CHV_PLL_DW0(port));
+	pll_dw1 = vlv_dpio_read(i915, pipe, CHV_PLL_DW1(port));
+	pll_dw2 = vlv_dpio_read(i915, pipe, CHV_PLL_DW2(port));
+	pll_dw3 = vlv_dpio_read(i915, pipe, CHV_PLL_DW3(port));
+	vlv_dpio_put(i915);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  	clock.m2 = (pll_dw0 & 0xff) << 22;
@@ -8555,12 +8555,12 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
  static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
  					struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->lspcon_downsampling = false; - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+	if (IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9) {
  		u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -8571,8 +8571,8 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
  				/* We support 4:2:0 in full blend mode only */
  				if (!blend)
  					output = INTEL_OUTPUT_FORMAT_INVALID;
-				else if (!(IS_GEMINILAKE(dev_priv) ||
-					   INTEL_GEN(dev_priv) >= 10))
+				else if (!(IS_GEMINILAKE(i915) ||
+					   INTEL_GEN(i915) >= 10))
  					output = INTEL_OUTPUT_FORMAT_INVALID;
  				else
  					output = INTEL_OUTPUT_FORMAT_YCBCR420;
@@ -8599,7 +8599,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
  	u32 tmp;
@@ -8608,7 +8608,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
  	if (tmp & DISPPLANE_GAMMA_ENABLE)
  		crtc_state->gamma_enable = true;
- if (!HAS_GMCH(dev_priv) &&
+	if (!HAS_GMCH(i915) &&
  	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
  		crtc_state->csc_enable = true;
  }
@@ -8616,14 +8616,14 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
  static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  				 struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	u32 tmp;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -8637,8 +8637,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  	if (!(tmp & PIPECONF_ENABLE))
  		goto out;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv)) {
+	if (IS_G4X(i915) || IS_VALLEYVIEW(i915) ||
+	    IS_CHERRYVIEW(i915)) {
  		switch (tmp & PIPECONF_BPC_MASK) {
  		case PIPECONF_6BPC:
  			pipe_config->pipe_bpp = 18;
@@ -8654,20 +8654,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  		}
  	}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
  		pipe_config->limited_color_range = true;
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
  		PIPECONF_GAMMA_MODE_SHIFT;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
  	intel_color_get_config(pipe_config);
- if (INTEL_GEN(dev_priv) < 4)
+	if (INTEL_GEN(i915) < 4)
  		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8675,18 +8675,18 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config); - if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		/* No way to read it out on pipes B and C */
-		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
-			tmp = dev_priv->chv_dpll_md[crtc->pipe];
+		if (IS_CHERRYVIEW(i915) && crtc->pipe != PIPE_A)
+			tmp = i915->chv_dpll_md[crtc->pipe];
  		else
  			tmp = I915_READ(DPLL_MD(crtc->pipe));
  		pipe_config->pixel_multiplier =
  			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  		pipe_config->dpll_hw_state.dpll_md = tmp;
-	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+	} else if (IS_I945G(i915) || IS_I945GM(i915) ||
+		   IS_G33(i915) || IS_PINEVIEW(i915)) {
  		tmp = I915_READ(DPLL(crtc->pipe));
  		pipe_config->pixel_multiplier =
  			((tmp & SDVO_MULTIPLIER_MASK)
@@ -8698,7 +8698,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  		pipe_config->pixel_multiplier = 1;
  	}
  	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
-	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+	if (!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915)) {
  		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  	} else {
@@ -8708,9 +8708,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  						     DPLL_PORTB_READY_MASK);
  	}
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		chv_crtc_clock_get(crtc, pipe_config);
-	else if (IS_VALLEYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915))
  		vlv_crtc_clock_get(crtc, pipe_config);
  	else
  		i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8726,12 +8726,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ironlake_init_pch_refclk(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
  	int i;
@@ -8744,7 +8744,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  	bool using_ssc_source = false;
/* We need to take the global config into account */
-	for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		switch (encoder->type) {
  		case INTEL_OUTPUT_LVDS:
  			has_panel = true;
@@ -8760,8 +8760,8 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  		}
  	}
- if (HAS_PCH_IBX(dev_priv)) {
-		has_ck505 = dev_priv->vbt.display_clock_mode;
+	if (HAS_PCH_IBX(i915)) {
+		has_ck505 = i915->vbt.display_clock_mode;
  		can_ssc = has_ck505;
  	} else {
  		has_ck505 = false;
@@ -8769,7 +8769,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  	}
/* Check if any DPLLs are using the SSC source */
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+	for (i = 0; i < i915->num_shared_dpll; i++) {
  		u32 temp = I915_READ(PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -8810,11 +8810,11 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  	if (has_panel) {
  		final |= DREF_SSC_SOURCE_ENABLE;
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
+		if (intel_panel_use_ssc(i915) && can_ssc)
  			final |= DREF_SSC1_ENABLE;
if (has_cpu_edp) {
-			if (intel_panel_use_ssc(dev_priv) && can_ssc)
+			if (intel_panel_use_ssc(i915) && can_ssc)
  				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  			else
  				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
@@ -8841,7 +8841,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  		val |= DREF_SSC_SOURCE_ENABLE;
/* SSC must be turned on before enabling the CPU output */
-		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+		if (intel_panel_use_ssc(i915) && can_ssc) {
  			DRM_DEBUG_KMS("Using SSC on panel\n");
  			val |= DREF_SSC1_ENABLE;
  		} else
@@ -8856,7 +8856,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
  		if (has_cpu_edp) {
-			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+			if (intel_panel_use_ssc(i915) && can_ssc) {
  				DRM_DEBUG_KMS("Using SSC on eDP\n");
  				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  			} else
@@ -8898,7 +8898,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  	BUG_ON(val != final);
  }
-static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *i915)
  {
  	u32 tmp;
@@ -8920,78 +8920,78 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  }
/* WaMPhyProgramming:hsw */
-static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+static void lpt_program_fdi_mphy(struct drm_i915_private *i915)
  {
  	u32 tmp;
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x8008, SBI_MPHY);
  	tmp &= ~(0xFF << 24);
  	tmp |= (0x12 << 24);
-	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x8008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2008, SBI_MPHY);
  	tmp |= (1 << 11);
-	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2108, SBI_MPHY);
  	tmp |= (1 << 11);
-	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2108, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x206C, SBI_MPHY);
  	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
-	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x206C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x216C, SBI_MPHY);
  	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
-	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x216C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2080, SBI_MPHY);
  	tmp &= ~(7 << 13);
  	tmp |= (5 << 13);
-	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2180, SBI_MPHY);
  	tmp &= ~(7 << 13);
  	tmp |= (5 << 13);
-	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2180, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x208C, SBI_MPHY);
  	tmp &= ~0xFF;
  	tmp |= 0x1C;
-	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x208C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x218C, SBI_MPHY);
  	tmp &= ~0xFF;
  	tmp |= 0x1C;
-	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x218C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2098, SBI_MPHY);
  	tmp &= ~(0xFF << 16);
  	tmp |= (0x1C << 16);
-	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2098, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x2198, SBI_MPHY);
  	tmp &= ~(0xFF << 16);
  	tmp |= (0x1C << 16);
-	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x2198, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x20C4, SBI_MPHY);
  	tmp |= (1 << 27);
-	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x20C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x21C4, SBI_MPHY);
  	tmp |= (1 << 27);
-	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x21C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x20EC, SBI_MPHY);
  	tmp &= ~(0xF << 28);
  	tmp |= (4 << 28);
-	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x20EC, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+	tmp = intel_sbi_read(i915, 0x21EC, SBI_MPHY);
  	tmp &= ~(0xF << 28);
  	tmp |= (4 << 28);
-	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+	intel_sbi_write(i915, 0x21EC, tmp, SBI_MPHY);
  }
/* Implements 3 different sequences from BSpec chapter "Display iCLK
@@ -9000,69 +9000,69 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
   * - Sequence to enable CLKOUT_DP without spread
   * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
   */
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct drm_i915_private *i915,
  				 bool with_spread, bool with_fdi)
  {
  	u32 reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  		with_spread = true;
-	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+	if (WARN(HAS_PCH_LPT_LP(i915) &&
  	    with_fdi, "LP PCH doesn't have FDI\n"))
  		with_fdi = false;
- mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp = intel_sbi_read(i915, SBI_SSCCTL, SBI_ICLK);
  	tmp &= ~SBI_SSCCTL_DISABLE;
  	tmp |= SBI_SSCCTL_PATHALT;
-	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(24); if (with_spread) {
-		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+		tmp = intel_sbi_read(i915, SBI_SSCCTL, SBI_ICLK);
  		tmp &= ~SBI_SSCCTL_PATHALT;
-		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+		intel_sbi_write(i915, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi) {
-			lpt_reset_fdi_mphy(dev_priv);
-			lpt_program_fdi_mphy(dev_priv);
+			lpt_reset_fdi_mphy(i915);
+			lpt_program_fdi_mphy(i915);
  		}
  	}
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
-	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+	reg = HAS_PCH_LPT_LP(i915) ? SBI_GEN0 : SBI_DBUFF0;
+	tmp = intel_sbi_read(i915, reg, SBI_ICLK);
  	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
-	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+	intel_sbi_write(i915, reg, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
  }
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct drm_i915_private *i915)
  {
  	u32 reg, tmp;
- mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
-	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+	reg = HAS_PCH_LPT_LP(i915) ? SBI_GEN0 : SBI_DBUFF0;
+	tmp = intel_sbi_read(i915, reg, SBI_ICLK);
  	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
-	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+	intel_sbi_write(i915, reg, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp = intel_sbi_read(i915, SBI_SSCCTL, SBI_ICLK);
  	if (!(tmp & SBI_SSCCTL_DISABLE)) {
  		if (!(tmp & SBI_SSCCTL_PATHALT)) {
  			tmp |= SBI_SSCCTL_PATHALT;
-			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+			intel_sbi_write(i915, SBI_SSCCTL, tmp, SBI_ICLK);
  			udelay(32);
  		}
  		tmp |= SBI_SSCCTL_DISABLE;
-		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+		intel_sbi_write(i915, SBI_SSCCTL, tmp, SBI_ICLK);
  	}
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
  }
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -9097,7 +9097,7 @@ static const u16 sscdivintphase[] = {
   * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
   * change in clock period = -(steps / 10) * 5.787 ps
   */
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct drm_i915_private *i915, int steps)
  {
  	u32 tmp;
  	int idx = BEND_IDX(steps);
@@ -9108,25 +9108,25 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  		return;
- mutex_lock(&dev_priv->sb_lock);
+	mutex_lock(&i915->sb_lock);
if (steps % 10 != 0)
  		tmp = 0xAAAAAAAB;
  	else
  		tmp = 0x00000000;
-	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+	tmp = intel_sbi_read(i915, SBI_SSCDIVINTPHASE, SBI_ICLK);
  	tmp &= 0xffff0000;
  	tmp |= sscdivintphase[idx];
-	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+	intel_sbi_write(i915, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+	mutex_unlock(&i915->sb_lock);
  }
#undef BEND_IDX -static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct drm_i915_private *i915)
  {
  	u32 fuse_strap = I915_READ(FUSE_STRAP);
  	u32 ctl = I915_READ(SPLL_CTL);
@@ -9138,14 +9138,14 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
  	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
  		return true;
- if (IS_BROADWELL(dev_priv) &&
+	if (IS_BROADWELL(i915) &&
  	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
  		return true;
return false;
  }
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *i915,
  			       enum intel_dpll_id id)
  {
  	u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -9157,7 +9157,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
  	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
  		return true;
- if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+	if ((IS_BROADWELL(i915) || IS_HSW_ULT(i915)) &&
  	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
  	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
  		return true;
@@ -9165,13 +9165,13 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
  	return false;
  }
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
  	bool pch_ssc_in_use = false;
  	bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		switch (encoder->type) {
  		case INTEL_OUTPUT_ANALOG:
  			has_fdi = true;
@@ -9196,17 +9196,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
  	 * clock hierarchy. That would also allow us to do
  	 * clock bending finally.
  	 */
-	if (spll_uses_pch_ssc(dev_priv)) {
+	if (spll_uses_pch_ssc(i915)) {
  		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
  		pch_ssc_in_use = true;
  	}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+	if (wrpll_uses_pch_ssc(i915, DPLL_ID_WRPLL1)) {
  		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
  		pch_ssc_in_use = true;
  	}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+	if (wrpll_uses_pch_ssc(i915, DPLL_ID_WRPLL2)) {
  		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
  		pch_ssc_in_use = true;
  	}
@@ -9215,28 +9215,28 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
  		return;
if (has_fdi) {
-		lpt_bend_clkout_dp(dev_priv, 0);
-		lpt_enable_clkout_dp(dev_priv, true, true);
+		lpt_bend_clkout_dp(i915, 0);
+		lpt_enable_clkout_dp(i915, true, true);
  	} else {
-		lpt_disable_clkout_dp(dev_priv);
+		lpt_disable_clkout_dp(i915);
  	}
  }
/*
   * Initialize reference clocks when the driver loads
   */
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct drm_i915_private *i915)
  {
-	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
-		ironlake_init_pch_refclk(dev_priv);
-	else if (HAS_PCH_LPT(dev_priv))
-		lpt_init_pch_refclk(dev_priv);
+	if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
+		ironlake_init_pch_refclk(i915);
+	else if (HAS_PCH_LPT(i915))
+		lpt_init_pch_refclk(i915);
  }
static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
@@ -9280,11 +9280,11 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
  static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 val = 0;
- if (IS_HASWELL(dev_priv) && crtc_state->dither)
+	if (IS_HASWELL(i915) && crtc_state->dither)
  		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -9299,7 +9299,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
  static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 val = 0;
switch (crtc_state->pipe_bpp) {
@@ -9331,7 +9331,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
  		val |= PIPEMISC_YUV420_ENABLE |
  			PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (INTEL_GEN(dev_priv) >= 11 &&
+	if (INTEL_GEN(i915) >= 11 &&
  	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
  					   BIT(PLANE_CURSOR))) == 0)
  		val |= PIPEMISC_HDR_MODE_PRECISION;
@@ -9341,7 +9341,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 tmp;
tmp = I915_READ(PIPEMISC(crtc->pipe));
@@ -9381,17 +9381,17 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
  				  struct intel_crtc_state *crtc_state,
  				  struct dpll *reduced_clock)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 dpll, fp, fp2;
  	int factor;
/* Enable autotuning of the PLL clock (if permissible) */
  	factor = 21;
  	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if ((intel_panel_use_ssc(dev_priv) &&
-		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
-		    (HAS_PCH_IBX(dev_priv) &&
-		     intel_is_dual_link_lvds(dev_priv)))
+		if ((intel_panel_use_ssc(i915) &&
+		     i915->vbt.lvds_ssc_freq == 100000) ||
+		    (HAS_PCH_IBX(i915) &&
+		     intel_is_dual_link_lvds(i915)))
  			factor = 25;
  	} else if (crtc_state->sdvo_tv_clock) {
  		factor = 20;
@@ -9442,7 +9442,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
  	 * clear if it''s a win or loss power wise. No point in doing
  	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
  	 */
-	if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+	if (INTEL_INFO(i915)->num_pipes == 3 &&
  	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
  		dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -9467,7 +9467,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
  	}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    intel_panel_use_ssc(dev_priv))
+	    intel_panel_use_ssc(i915))
  		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  	else
  		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -9482,7 +9482,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
  static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  				       struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct intel_limit *limit;
  	int refclk = 120000;
@@ -9494,13 +9494,13 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  		return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv)) {
+		if (intel_panel_use_ssc(i915)) {
  			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
-				      dev_priv->vbt.lvds_ssc_freq);
-			refclk = dev_priv->vbt.lvds_ssc_freq;
+				      i915->vbt.lvds_ssc_freq);
+			refclk = i915->vbt.lvds_ssc_freq;
  		}
- if (intel_is_dual_link_lvds(dev_priv)) {
+		if (intel_is_dual_link_lvds(i915)) {
  			if (refclk == 100000)
  				limit = &intel_limits_ironlake_dual_lvds_100m;
  			else
@@ -9537,7 +9537,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  					 struct intel_link_m_n *m_n)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum pipe pipe = crtc->pipe;
m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9554,10 +9554,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  					 struct intel_link_m_n *m_n,
  					 struct intel_link_m_n *m2_n2)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
- if (INTEL_GEN(dev_priv) >= 5) {
+	if (INTEL_GEN(i915) >= 5) {
  		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9566,7 +9566,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
+		if (m2_n2 && transcoder_has_m2_n2(i915, transcoder)) {
  			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
  			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
@@ -9608,7 +9608,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
  				    struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  	u32 ps_ctrl = 0;
  	int id = -1;
@@ -9640,7 +9640,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
  				 struct intel_initial_plane_config *plane_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
  	enum plane_id plane_id = plane->id;
  	enum pipe pipe;
@@ -9667,12 +9667,12 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(PLANE_CTL(pipe, plane_id)); - if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
  	else
  		pixel_format = val & PLANE_CTL_FORMAT_MASK;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)) {
  		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
  		alpha &= PLANE_COLOR_ALPHA_MASK;
  	} else {
@@ -9729,7 +9729,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
  		break;
  	}
- if (INTEL_GEN(dev_priv) >= 10 &&
+	if (INTEL_GEN(i915) >= 10 &&
  	    val & PLANE_CTL_FLIP_HORIZONTAL)
  		plane_config->rotation |= DRM_MODE_REFLECT_X;
@@ -9766,7 +9766,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  				     struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9779,7 +9779,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  		/* We currently do not free assignements of panel fitters on
  		 * ivb/hsw (since we don't use the higher upscaling modes which
  		 * differentiates them) so just WARN about this case for now. */
-		if (IS_GEN(dev_priv, 7)) {
+		if (IS_GEN(i915, 7)) {
  			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  				PF_PIPE_SEL_IVB(crtc->pipe));
  		}
@@ -9790,14 +9790,14 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  				     struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	u32 tmp;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -9850,7 +9850,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); - if (HAS_PCH_IBX(dev_priv)) {
+		if (HAS_PCH_IBX(i915)) {
  			/*
  			 * The pipe->pch transcoder and pch transcoder->pll
  			 * mapping is fixed.
@@ -9865,10 +9865,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  		}
pipe_config->shared_dpll =
-			intel_get_shared_dpll_by_id(dev_priv, pll_id);
+			intel_get_shared_dpll_by_id(i915, pll_id);
  		pll = pipe_config->shared_dpll;
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+		WARN_ON(!pll->info->funcs->get_hw_state(i915, pll,
  						&pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
@@ -9889,19 +9889,19 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
  static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  				      struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_atomic_state *state =
  		to_intel_atomic_state(crtc_state->base.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
-	    INTEL_GEN(dev_priv) >= 11) {
+	    INTEL_GEN(i915) >= 11) {
  		struct intel_encoder *encoder =
  			intel_get_crtc_new_encoder(state, crtc_state);
@@ -9915,7 +9915,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  	return 0;
  }
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
+static void cannonlake_get_ddi_pll(struct drm_i915_private *i915,
  				   enum port port,
  				   struct intel_crtc_state *pipe_config)
  {
@@ -9928,10 +9928,10 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
  	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
  		return;
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(i915, id);
  }
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+static void icelake_get_ddi_pll(struct drm_i915_private *i915,
  				enum port port,
  				struct intel_crtc_state *pipe_config)
  {
@@ -9939,21 +9939,21 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
  	u32 temp;
/* TODO: TBT pll not implemented. */
-	if (intel_port_is_combophy(dev_priv, port)) {
+	if (intel_port_is_combophy(i915, port)) {
  		temp = I915_READ(DPCLKA_CFGCR0_ICL) &
  		       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
  		id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-	} else if (intel_port_is_tc(dev_priv, port)) {
-		id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+	} else if (intel_port_is_tc(i915, port)) {
+		id = icl_tc_port_to_pll_id(intel_port_to_tc(i915, port));
  	} else {
  		WARN(1, "Invalid port %x\n", port);
  		return;
  	}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(i915, id);
  }
-static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+static void bxt_get_ddi_pll(struct drm_i915_private *i915,
  				enum port port,
  				struct intel_crtc_state *pipe_config)
  {
@@ -9974,10 +9974,10 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  		return;
  	}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(i915, id);
  }
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+static void skylake_get_ddi_pll(struct drm_i915_private *i915,
  				enum port port,
  				struct intel_crtc_state *pipe_config)
  {
@@ -9990,10 +9990,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
  		return;
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(i915, id);
  }
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
+static void haswell_get_ddi_pll(struct drm_i915_private *i915,
  				enum port port,
  				struct intel_crtc_state *pipe_config)
  {
@@ -10026,7 +10026,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  		return;
  	}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(i915, id);
  }
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
@@ -10035,7 +10035,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  				     intel_wakeref_t *wakerefs)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum intel_display_power_domain power_domain;
  	unsigned long panel_transcoder_mask = 0;
  	unsigned long enabled_panel_transcoders = 0;
@@ -10043,11 +10043,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  	intel_wakeref_t wf;
  	u32 tmp;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		panel_transcoder_mask |=
  			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER_EDP(dev_priv))
+	if (HAS_TRANSCODER_EDP(i915))
  		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
@@ -10062,7 +10062,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  	 */
  	for_each_set_bit(panel_transcoder,
  			 &panel_transcoder_mask,
-			 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+			 ARRAY_SIZE(INTEL_INFO(i915)->trans_offsets)) {
  		bool force_thru = false;
  		enum pipe trans_pipe;
@@ -10113,7 +10113,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wf = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wf)
  		return false;
@@ -10131,7 +10131,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  					 intel_wakeref_t *wakerefs)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum intel_display_power_domain power_domain;
  	enum transcoder cpu_transcoder;
  	intel_wakeref_t wf;
@@ -10147,7 +10147,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+		wf = intel_display_power_get_if_enabled(i915, power_domain);
  		if (!wf)
  			continue;
@@ -10161,7 +10161,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  		 * registers/MIPI[BXT]. We can break out here early, since we
  		 * need the same DSI PLL to be enabled for both DSI ports.
  		 */
-		if (!bxt_dsi_pll_is_enabled(dev_priv))
+		if (!bxt_dsi_pll_is_enabled(i915))
  			break;
/* XXX: this works for video mode only */
@@ -10183,7 +10183,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  				       struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll;
  	enum port port;
  	u32 tmp;
@@ -10192,20 +10192,20 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (INTEL_GEN(dev_priv) >= 11)
-		icelake_get_ddi_pll(dev_priv, port, pipe_config);
-	else if (IS_CANNONLAKE(dev_priv))
-		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
-	else if (IS_GEN9_BC(dev_priv))
-		skylake_get_ddi_pll(dev_priv, port, pipe_config);
-	else if (IS_GEN9_LP(dev_priv))
-		bxt_get_ddi_pll(dev_priv, port, pipe_config);
+	if (INTEL_GEN(i915) >= 11)
+		icelake_get_ddi_pll(i915, port, pipe_config);
+	else if (IS_CANNONLAKE(i915))
+		cannonlake_get_ddi_pll(i915, port, pipe_config);
+	else if (IS_GEN9_BC(i915))
+		skylake_get_ddi_pll(i915, port, pipe_config);
+	else if (IS_GEN9_LP(i915))
+		bxt_get_ddi_pll(i915, port, pipe_config);
  	else
-		haswell_get_ddi_pll(dev_priv, port, pipe_config);
+		haswell_get_ddi_pll(i915, port, pipe_config);
pll = pipe_config->shared_dpll;
  	if (pll) {
-		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+		WARN_ON(!pll->info->funcs->get_hw_state(i915, pll,
  						&pipe_config->dpll_hw_state));
  	}
@@ -10214,7 +10214,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  	 * DDI E. So just check whether this pipe is wired to DDI E and whether
  	 * the PCH transcoder is on.
  	 */
-	if (INTEL_GEN(dev_priv) < 9 &&
+	if (INTEL_GEN(i915) < 9 &&
  	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  		pipe_config->has_pch_encoder = true;
@@ -10229,7 +10229,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  				    struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
  	enum intel_display_power_domain power_domain;
  	u64 power_domain_mask;
@@ -10238,7 +10238,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  	intel_crtc_init_scalers(crtc, pipe_config);
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wf = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wf)
  		return false;
@@ -10250,7 +10250,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  	active = hsw_get_transcoder_state(crtc, pipe_config,
  					  &power_domain_mask, wakerefs);
- if (IS_GEN9_LP(dev_priv) &&
+	if (IS_GEN9_LP(i915) &&
  	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
  					 &power_domain_mask, wakerefs)) {
  		WARN_ON(active);
@@ -10261,7 +10261,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  		goto out;
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
-	    INTEL_GEN(dev_priv) >= 11) {
+	    INTEL_GEN(i915) >= 11) {
  		haswell_get_ddi_port_state(crtc, pipe_config);
  		intel_get_pipe_timings(crtc, pipe_config);
  	}
@@ -10273,7 +10273,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); - if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
@@ -10290,19 +10290,19 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wf = intel_display_power_get_if_enabled(i915, power_domain);
  	if (wf) {
  		wakerefs[power_domain] = wf;
  		power_domain_mask |= BIT_ULL(power_domain);
- if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			skylake_get_pfit_config(crtc, pipe_config);
  		else
  			ironlake_get_pfit_config(crtc, pipe_config);
  	}
if (hsw_crtc_supports_ips(crtc)) {
-		if (IS_HASWELL(dev_priv))
+		if (IS_HASWELL(i915))
  			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
  		else {
  			/*
@@ -10324,7 +10324,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
  	for_each_power_domain(power_domain, power_domain_mask)
-		intel_display_power_put(dev_priv,
+		intel_display_power_put(i915,
  					power_domain, wakerefs[power_domain]);
return active;
@@ -10332,13 +10332,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  	u32 base;
- if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+	if (INTEL_INFO(i915)->display.cursor_needs_physical)
  		base = obj->phys_handle->busaddr;
  	else
  		base = intel_plane_ggtt_offset(plane_state);
@@ -10346,7 +10346,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
  	base += plane_state->color_plane[0].offset;
/* ILK+ do this automagically */
-	if (HAS_GMCH(dev_priv) &&
+	if (HAS_GMCH(i915) &&
  	    plane_state->base.rotation & DRM_MODE_ROTATE_180)
  		base += (plane_state->base.crtc_h *
  			 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
@@ -10532,7 +10532,7 @@ static void i845_update_cursor(struct intel_plane *plane,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	u32 cntl = 0, base = 0, pos = 0, size = 0;
  	unsigned long irqflags;
@@ -10549,7 +10549,7 @@ static void i845_update_cursor(struct intel_plane *plane,
  		pos = intel_cursor_position(plane_state);
  	}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
/* On these chipsets we can only modify the base/size/stride
  	 * whilst the cursor is disabled.
@@ -10570,7 +10570,7 @@ static void i845_update_cursor(struct intel_plane *plane,
  		I915_WRITE_FW(CURPOS(PIPE_A), pos);
  	}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void i845_disable_cursor(struct intel_plane *plane,
@@ -10582,13 +10582,13 @@ static void i845_disable_cursor(struct intel_plane *plane,
  static bool i845_cursor_get_hw_state(struct intel_plane *plane,
  				     enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -10596,7 +10596,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane, *pipe = PIPE_A; - intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -10612,10 +10612,10 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
  static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 cntl = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return cntl;
if (crtc_state->gamma_enable)
@@ -10624,7 +10624,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
  	if (crtc_state->csc_enable)
  		cntl |= MCURSOR_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) < 5 && !IS_G4X(i915))
  		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
return cntl;
@@ -10633,11 +10633,11 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
  static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
  			   const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	u32 cntl = 0;
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+	if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915))
  		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
switch (plane_state->base.crtc_w) {
@@ -10663,7 +10663,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	int width = plane_state->base.crtc_w;
  	int height = plane_state->base.crtc_h;
@@ -10687,7 +10687,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
  	 * cursor is not rotated. Everything else requires square
  	 * cursors.
  	 */
-	if (HAS_CUR_FBC(dev_priv) &&
+	if (HAS_CUR_FBC(i915) &&
  	    plane_state->base.rotation & DRM_MODE_ROTATE_0) {
  		if (height < 8 || height > width)
  			return false;
@@ -10703,7 +10703,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
  			     struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	enum pipe pipe = plane->pipe;
  	int ret;
@@ -10743,7 +10743,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
  	 * display power well must be turned off and on again.
  	 * Refuse the put the cursor into that compromised position.
  	 */
-	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_C &&
  	    plane_state->base.visible && plane_state->base.crtc_x < 0) {
  		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  		return -EINVAL;
@@ -10758,7 +10758,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
  	unsigned long irqflags;
@@ -10774,7 +10774,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
  		pos = intel_cursor_position(plane_state);
  	}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
/*
  	 * On some platforms writing CURCNTR first will also
@@ -10796,13 +10796,13 @@ static void i9xx_update_cursor(struct intel_plane *plane,
  	 * the CURCNTR write arms the update.
  	 */
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		skl_write_cursor_wm(plane, crtc_state);
if (plane->cursor.base != base ||
  	    plane->cursor.size != fbc_ctl ||
  	    plane->cursor.cntl != cntl) {
-		if (HAS_CUR_FBC(dev_priv))
+		if (HAS_CUR_FBC(i915))
  			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
  		I915_WRITE_FW(CURCNTR(pipe), cntl);
  		I915_WRITE_FW(CURPOS(pipe), pos);
@@ -10816,7 +10816,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
  		I915_WRITE_FW(CURBASE(pipe), base);
  	}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void i9xx_disable_cursor(struct intel_plane *plane,
@@ -10828,7 +10828,7 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
  static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
  				     enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	bool ret;
@@ -10840,7 +10840,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
  	 * display power wells.
  	 */
  	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -10848,13 +10848,13 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, ret = val & MCURSOR_MODE; - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+	if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
  		*pipe = plane->pipe;
  	else
  		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
  			MCURSOR_PIPE_SELECT_SHIFT;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -10924,7 +10924,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
  	struct drm_encoder *encoder = &intel_encoder->base;
  	struct drm_crtc *crtc = NULL;
  	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_mode_config *config = &dev->mode_config;
  	struct drm_atomic_state *state = NULL, *restore_state = NULL;
  	struct drm_connector_state *connector_state;
@@ -11051,7 +11051,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
  	drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
-	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+	intel_wait_for_vblank(i915, intel_crtc->pipe);
  	return true;
fail:
@@ -11096,14 +11096,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
  static int i9xx_pll_refclk(struct drm_device *dev,
  			   const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-		return dev_priv->vbt.lvds_ssc_freq;
-	else if (HAS_PCH_SPLIT(dev_priv))
+		return i915->vbt.lvds_ssc_freq;
+	else if (HAS_PCH_SPLIT(i915))
  		return 120000;
-	else if (!IS_GEN(dev_priv, 2))
+	else if (!IS_GEN(i915, 2))
  		return 96000;
  	else
  		return 48000;
@@ -11114,7 +11114,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  				struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int pipe = pipe_config->cpu_transcoder;
  	u32 dpll = pipe_config->dpll_hw_state.dpll;
  	u32 fp;
@@ -11128,7 +11128,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  		fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-	if (IS_PINEVIEW(dev_priv)) {
+	if (IS_PINEVIEW(i915)) {
  		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  	} else {
@@ -11136,8 +11136,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  	}
- if (!IS_GEN(dev_priv, 2)) {
-		if (IS_PINEVIEW(dev_priv))
+	if (!IS_GEN(i915, 2)) {
+		if (IS_PINEVIEW(i915))
  			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  		else
@@ -11159,12 +11159,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  			return;
  		}
- if (IS_PINEVIEW(dev_priv))
+		if (IS_PINEVIEW(i915))
  			port_clock = pnv_calc_dpll_params(refclk, &clock);
  		else
  			port_clock = i9xx_calc_dpll_params(refclk, &clock);
  	} else {
-		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+		u32 lvds = IS_I830(i915) ? 0 : I915_READ(LVDS);
  		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -11221,7 +11221,7 @@ int intel_dotclock_calculate(int link_freq,
  static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  				   struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
  	i9xx_crtc_clock_get(crtc, pipe_config);
@@ -11232,7 +11232,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  	 * Calculate one based on the FDI configuration.
  	 */
  	pipe_config->base.adjusted_mode.crtc_clock =
-		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+		intel_dotclock_calculate(intel_fdi_link_freq(i915, pipe_config),
  					 &pipe_config->fdi_m_n);
  }
@@ -11240,7 +11240,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  struct drm_display_mode *
  intel_encoder_current_mode(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc_state *crtc_state;
  	struct drm_display_mode *mode;
  	struct intel_crtc *crtc;
@@ -11249,7 +11249,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
  	if (!encoder->get_hw_state(encoder, &pipe))
  		return NULL;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	crtc = intel_get_crtc_for_pipe(i915, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  	if (!mode)
@@ -11263,7 +11263,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
crtc_state->base.crtc = &crtc->base; - if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
+	if (!i915->display.get_pipe_config(crtc, crtc_state)) {
  		kfree(crtc_state);
  		kfree(mode);
  		return NULL;
@@ -11337,7 +11337,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_plane *plane = to_intel_plane(plane_state->plane);
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	bool mode_changed = needs_modeset(crtc_state);
  	bool was_crtc_enabled = old_crtc_state->base.active;
  	bool is_crtc_enabled = crtc_state->active;
@@ -11345,7 +11345,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
  	struct drm_framebuffer *fb = plane_state->fb;
  	int ret;
- if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+	if (INTEL_GEN(i915) >= 9 && plane->id != PLANE_CURSOR) {
  		ret = skl_update_scaler_plane(
  			to_intel_crtc_state(crtc_state),
  			to_intel_plane_state(plane_state));
@@ -11395,14 +11395,14 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
  			 turn_off, turn_on, mode_changed);
if (turn_on) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+		if (INTEL_GEN(i915) < 5 && !IS_G4X(i915))
  			pipe_config->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
  		if (plane->id != PLANE_CURSOR)
  			pipe_config->disable_cxsr = true;
  	} else if (turn_off) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+		if (INTEL_GEN(i915) < 5 && !IS_G4X(i915))
  			pipe_config->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
@@ -11410,7 +11410,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
  			pipe_config->disable_cxsr = true;
  	} else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
  					to_intel_plane_state(plane_state))) {
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+		if (INTEL_GEN(i915) < 5 && !IS_G4X(i915)) {
  			/* FIXME bollocks */
  			pipe_config->update_wm_pre = true;
  			pipe_config->update_wm_post = true;
@@ -11454,8 +11454,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
  	 * plane, not only sprite plane.
  	 */
  	if (plane->id != PLANE_CURSOR &&
-	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
-	     IS_IVYBRIDGE(dev_priv)) &&
+	    (IS_GEN_RANGE(i915, 5, 6) ||
+	     IS_IVYBRIDGE(i915)) &&
  	    (turn_on || (!needs_scaling(old_plane_state) &&
  			 needs_scaling(to_intel_plane_state(plane_state)))))
  		pipe_config->disable_lp_wm = true;
@@ -11519,13 +11519,13 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
  static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
  	struct intel_plane *plane, *linked;
  	struct intel_plane_state *plane_state;
  	int i;
- if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return 0;
/*
@@ -11555,7 +11555,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
  		    !(crtc_state->nv12_planes & BIT(plane->id)))
  			continue;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+		for_each_intel_plane_on_crtc(&i915->drm, crtc, linked) {
  			if (!icl_is_nv12_y_plane(linked->id))
  				continue;
@@ -11602,21 +11602,21 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
  static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  				   struct drm_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_crtc_state *pipe_config =
  		to_intel_crtc_state(crtc_state);
  	int ret;
  	bool mode_changed = needs_modeset(crtc_state);
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+	if (INTEL_GEN(i915) < 5 && !IS_G4X(i915) &&
  	    mode_changed && !crtc_state->active)
  		pipe_config->update_wm_post = true;
if (mode_changed && crtc_state->enable &&
-	    dev_priv->display.crtc_compute_clock &&
+	    i915->display.crtc_compute_clock &&
  	    !WARN_ON(pipe_config->shared_dpll)) {
-		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+		ret = i915->display.crtc_compute_clock(intel_crtc,
  							   pipe_config);
  		if (ret)
  			return ret;
@@ -11637,16 +11637,16 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  	}
ret = 0;
-	if (dev_priv->display.compute_pipe_wm) {
-		ret = dev_priv->display.compute_pipe_wm(pipe_config);
+	if (i915->display.compute_pipe_wm) {
+		ret = i915->display.compute_pipe_wm(pipe_config);
  		if (ret) {
  			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  			return ret;
  		}
  	}
- if (dev_priv->display.compute_intermediate_wm) {
-		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+	if (i915->display.compute_intermediate_wm) {
+		if (WARN_ON(!i915->display.compute_pipe_wm))
  			return 0;
/*
@@ -11654,14 +11654,14 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  		 * old state and the new state.  We can program these
  		 * immediately.
  		 */
-		ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+		ret = i915->display.compute_intermediate_wm(pipe_config);
  		if (ret) {
  			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  			return ret;
  		}
  	}
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		if (mode_changed || pipe_config->update_pipe)
  			ret = skl_update_scaler_crtc(pipe_config);
@@ -11671,11 +11671,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  			ret = skl_check_pipe_max_pixel_rate(intel_crtc,
  							    pipe_config);
  		if (!ret)
-			ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
+			ret = intel_atomic_setup_scalers(i915, intel_crtc,
  							 pipe_config);
  	}
- if (HAS_IPS(dev_priv))
+	if (HAS_IPS(i915))
  		pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
return ret;
@@ -11752,16 +11752,16 @@ static int
  compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  			  struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct drm_atomic_state *state = pipe_config->base.state;
  	struct drm_connector *connector;
  	struct drm_connector_state *connector_state;
  	int bpp, i;
- if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv)))
+	if ((IS_G4X(i915) || IS_VALLEYVIEW(i915) ||
+	    IS_CHERRYVIEW(i915)))
  		bpp = 10*3;
-	else if (INTEL_GEN(dev_priv) >= 5)
+	else if (INTEL_GEN(i915) >= 5)
  		bpp = 12*3;
  	else
  		bpp = 8*3;
@@ -11807,13 +11807,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
  }
static void
-intel_dump_infoframe(struct drm_i915_private *dev_priv,
+intel_dump_infoframe(struct drm_i915_private *i915,
  		     const union hdmi_infoframe *frame)
  {
  	if ((drm_debug & DRM_UT_KMS) == 0)
  		return;
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+	hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
  }
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -11907,7 +11907,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
  				   const char *context)
  {
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct intel_plane_state *plane_state;
  	struct intel_plane *plane;
  	char buf[64];
@@ -11953,13 +11953,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
  		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
  	if (pipe_config->infoframes.enable &
  	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
-		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+		intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
  	if (pipe_config->infoframes.enable &
  	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
-		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+		intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
  	if (pipe_config->infoframes.enable &
  	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
-		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+		intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
DRM_DEBUG_KMS("requested mode:\n");
  	drm_mode_debug_printmodeline(&pipe_config->base.mode);
@@ -11971,13 +11971,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
  		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
  		      pipe_config->pixel_rate);
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  			      crtc->num_scalers,
  			      pipe_config->scaler_state.scaler_users,
  		              pipe_config->scaler_state.scaler_id);
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  			      pipe_config->gmch_pfit.control,
  			      pipe_config->gmch_pfit.pgm_ratios,
@@ -11992,7 +11992,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
  	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
  		      pipe_config->ips_enabled, pipe_config->double_wide);
- intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+	intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
dump_planes:
  	if (!state)
@@ -12073,7 +12073,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
  static int
  clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(crtc_state->base.crtc->dev);
  	struct intel_crtc_state *saved_state;
@@ -12090,8 +12090,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  	saved_state->shared_dpll = crtc_state->shared_dpll;
  	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
  	saved_state->crc_enabled = crtc_state->crc_enabled;
-	if (IS_G4X(dev_priv) ||
-	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_G4X(i915) ||
+	    IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		saved_state->wm = crtc_state->wm;
/* Keep base drm_crtc_state intact, only clear our extended struct */
@@ -12316,7 +12316,7 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
  }
static void
-pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+pipe_config_infoframe_err(struct drm_i915_private *i915,
  			  bool adjust, const char *name,
  			  const union hdmi_infoframe *a,
  			  const union hdmi_infoframe *b)
@@ -12327,15 +12327,15 @@ pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
  		drm_dbg(DRM_UT_KMS, "expected:");
-		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+		hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, a);
  		drm_dbg(DRM_UT_KMS, "found");
-		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+		hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, b);
  	} else {
  		drm_err("mismatch in %s infoframe", name);
  		drm_err("expected:");
-		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+		hdmi_infoframe_log(KERN_ERR, i915->drm.dev, a);
  		drm_err("found");
-		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+		hdmi_infoframe_log(KERN_ERR, i915->drm.dev, b);
  	}
  }
@@ -12357,17 +12357,17 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...)
  	va_end(args);
  }
-static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+static bool fastboot_enabled(struct drm_i915_private *i915)
  {
  	if (i915_modparams.fastboot != -1)
  		return i915_modparams.fastboot;
/* Enable fastboot by default on Skylake and newer */
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return true;
/* Enable fastboot by default on VLV and CHV */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		return true;
/* Disabled by default on all others */
@@ -12375,7 +12375,7 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
  }
static bool
-intel_pipe_config_compare(struct drm_i915_private *dev_priv,
+intel_pipe_config_compare(struct drm_i915_private *i915,
  			  struct intel_crtc_state *current_config,
  			  struct intel_crtc_state *pipe_config,
  			  bool adjust)
@@ -12385,7 +12385,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
  		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
- if (fixup_inherited && !fastboot_enabled(dev_priv)) {
+	if (fixup_inherited && !fastboot_enabled(i915)) {
  		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
  		ret = false;
  	}
@@ -12525,7 +12525,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
  	if (!intel_compare_infoframe(&current_config->infoframes.name, \
  				     &pipe_config->infoframes.name)) { \
-		pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+		pipe_config_infoframe_err(i915, adjust, __stringify(name), \
  					  &current_config->infoframes.name, \
  					  &pipe_config->infoframes.name); \
  		ret = false; \
@@ -12544,7 +12544,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  	PIPE_CONF_CHECK_I(lane_count);
  	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_GEN(dev_priv) < 8) {
+	if (INTEL_GEN(i915) < 8) {
  		PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -12571,8 +12571,8 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  	PIPE_CONF_CHECK_I(pixel_multiplier);
  	PIPE_CONF_CHECK_I(output_format);
  	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
-	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
-	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if ((INTEL_GEN(i915) < 8 && !IS_HASWELL(i915)) ||
+	    IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		PIPE_CONF_CHECK_BOOL(limited_color_range);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
@@ -12597,7 +12597,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(gmch_pfit.control);
  	/* pfit ratios are autocomputed by the hw on gen4+ */
-	if (INTEL_GEN(dev_priv) < 4)
+	if (INTEL_GEN(i915) < 4)
  		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -12621,7 +12621,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
-		if (IS_CHERRYVIEW(dev_priv))
+		if (IS_CHERRYVIEW(i915))
  			PIPE_CONF_CHECK_X(cgm_mode);
  		else
  			PIPE_CONF_CHECK_X(csc_mode);
@@ -12667,7 +12667,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  	PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
+	if (IS_G4X(i915) || INTEL_GEN(i915) >= 5)
  		PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -12694,11 +12694,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  	return ret;
  }
-static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+static void intel_pipe_config_sanity_check(struct drm_i915_private *i915,
  					   const struct intel_crtc_state *pipe_config)
  {
  	if (pipe_config->has_pch_encoder) {
-		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(i915, pipe_config),
  							    &pipe_config->fdi_m_n);
  		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
@@ -12715,7 +12715,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  static void verify_wm_state(struct drm_crtc *crtc,
  			    struct drm_crtc_state *new_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct skl_hw_state {
  		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
  		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
@@ -12727,9 +12727,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
  	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	const enum pipe pipe = intel_crtc->pipe;
-	int plane, level, max_level = ilk_wm_max_level(dev_priv);
+	int plane, level, max_level = ilk_wm_max_level(i915);
- if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+	if (INTEL_GEN(i915) < 9 || !new_state->active)
  		return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -12741,17 +12741,17 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv); - skl_ddb_get_hw_state(dev_priv, &hw->ddb);
-	sw_ddb = &dev_priv->wm.skl_hw.ddb;
+	skl_ddb_get_hw_state(i915, &hw->ddb);
+	sw_ddb = &i915->wm.skl_hw.ddb;
- if (INTEL_GEN(dev_priv) >= 11 &&
+	if (INTEL_GEN(i915) >= 11 &&
  	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
  		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
  			  sw_ddb->enabled_slices,
  			  hw->ddb.enabled_slices);
/* planes */
-	for_each_universal_plane(dev_priv, pipe, plane) {
+	for_each_universal_plane(i915, pipe, plane) {
  		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
hw_plane_wm = &hw->wm.planes[plane];
@@ -12933,7 +12933,7 @@ verify_crtc_state(struct drm_crtc *crtc,
  		  struct drm_crtc_state *new_crtc_state)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_encoder *encoder;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_crtc_state *pipe_config, *sw_config;
@@ -12949,10 +12949,10 @@ verify_crtc_state(struct drm_crtc *crtc,
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+	active = i915->display.get_pipe_config(intel_crtc, pipe_config);
/* we keep both pipes enabled on 830 */
-	if (IS_I830(dev_priv))
+	if (IS_I830(i915))
  		active = new_crtc_state->active;
I915_STATE_WARN(new_crtc_state->active != active,
@@ -12984,10 +12984,10 @@ verify_crtc_state(struct drm_crtc *crtc,
  	if (!new_crtc_state->active)
  		return;
- intel_pipe_config_sanity_check(dev_priv, pipe_config);
+	intel_pipe_config_sanity_check(i915, pipe_config);
sw_config = to_intel_crtc_state(new_crtc_state);
-	if (!intel_pipe_config_compare(dev_priv, sw_config,
+	if (!intel_pipe_config_compare(i915, sw_config,
  				       pipe_config, false)) {
  		I915_STATE_WARN(1, "pipe state doesn't match!\n");
  		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
@@ -13009,7 +13009,7 @@ intel_verify_planes(struct intel_atomic_state *state)
  }
static void
-verify_single_dpll_state(struct drm_i915_private *dev_priv,
+verify_single_dpll_state(struct drm_i915_private *i915,
  			 struct intel_shared_dpll *pll,
  			 struct drm_crtc *crtc,
  			 struct drm_crtc_state *new_state)
@@ -13022,7 +13022,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("%s\n", pll->info->name); - active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
+	active = pll->info->funcs->get_hw_state(i915, pll, &dpll_hw_state);
if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
  		I915_STATE_WARN(!pll->on && pll->active_mask,
@@ -13068,12 +13068,12 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  			 struct drm_crtc_state *old_crtc_state,
  			 struct drm_crtc_state *new_crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
if (new_state->shared_dpll)
-		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+		verify_single_dpll_state(i915, new_state->shared_dpll, crtc, new_crtc_state);
if (old_state->shared_dpll &&
  	    old_state->shared_dpll != new_state->shared_dpll) {
@@ -13108,11 +13108,11 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
  static void
  verify_disabled_dpll_state(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
-		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+	for (i = 0; i < i915->num_shared_dpll; i++)
+		verify_single_dpll_state(i915, &i915->shared_dplls[i], NULL, NULL);
  }
static void
@@ -13127,7 +13127,7 @@ intel_modeset_verify_disabled(struct drm_device *dev,
  static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
  	 * The scanline counter increments at the leading edge of hsync.
@@ -13156,7 +13156,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
  	 * However if queried just before the start of vblank we'll get an
  	 * answer that's slightly in the future.
  	 */
-	if (IS_GEN(dev_priv, 2)) {
+	if (IS_GEN(i915, 2)) {
  		const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
  		int vtotal;
@@ -13165,7 +13165,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
  			vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
-	} else if (HAS_DDI(dev_priv) &&
+	} else if (HAS_DDI(i915) &&
  		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  		crtc->scanline_offset = 2;
  	} else
@@ -13174,12 +13174,12 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
  	struct intel_crtc *crtc;
  	int i;
- if (!dev_priv->display.crtc_compute_clock)
+	if (!i915->display.crtc_compute_clock)
  		return;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -13311,7 +13311,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
static int intel_modeset_checks(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
  	struct intel_crtc *crtc;
  	int ret = 0, i;
@@ -13323,12 +13323,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
/* keep the current setting */
  	if (!state->cdclk.force_min_cdclk_changed)
-		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+		state->cdclk.force_min_cdclk = i915->cdclk.force_min_cdclk;
state->modeset = true;
-	state->active_crtcs = dev_priv->active_crtcs;
-	state->cdclk.logical = dev_priv->cdclk.logical;
-	state->cdclk.actual = dev_priv->cdclk.actual;
+	state->active_crtcs = i915->active_crtcs;
+	state->cdclk.logical = i915->cdclk.logical;
+	state->cdclk.actual = i915->cdclk.actual;
  	state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -13349,19 +13349,19 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
  	 * mode set on this crtc.  For other crtcs we need to use the
  	 * adjusted_mode bits in the crtc directly.
  	 */
-	if (dev_priv->display.modeset_calc_cdclk) {
+	if (i915->display.modeset_calc_cdclk) {
  		enum pipe pipe;
- ret = dev_priv->display.modeset_calc_cdclk(state);
+		ret = i915->display.modeset_calc_cdclk(state);
  		if (ret < 0)
  			return ret;
/*
-		 * Writes to dev_priv->cdclk.logical must protected by
+		 * Writes to i915->cdclk.logical must protected by
  		 * holding all the crtc locks, even if we don't end up
  		 * touching the hardware
  		 */
-		if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+		if (intel_cdclk_changed(&i915->cdclk.logical,
  					&state->cdclk.logical)) {
  			ret = intel_lock_all_pipes(&state->base);
  			if (ret < 0)
@@ -13373,7 +13373,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
  			struct drm_crtc_state *crtc_state;
pipe = ilog2(state->active_crtcs);
-			crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+			crtc = &intel_get_crtc_for_pipe(i915, pipe)->base;
  			crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
  			if (crtc_state && needs_modeset(crtc_state))
  				pipe = INVALID_PIPE;
@@ -13383,15 +13383,15 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
/* All pipes must be switched off while we change the cdclk. */
  		if (pipe != INVALID_PIPE &&
-		    intel_cdclk_needs_cd2x_update(dev_priv,
-						  &dev_priv->cdclk.actual,
+		    intel_cdclk_needs_cd2x_update(i915,
+						  &i915->cdclk.actual,
  						  &state->cdclk.actual)) {
  			ret = intel_lock_all_pipes(&state->base);
  			if (ret < 0)
  				return ret;
state->cdclk.pipe = pipe;
-		} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+		} else if (intel_cdclk_needs_modeset(&i915->cdclk.actual,
  						     &state->cdclk.actual)) {
  			ret = intel_modeset_all_pipes(&state->base);
  			if (ret < 0)
@@ -13410,7 +13410,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
intel_modeset_clear_plls(state); - if (IS_HASWELL(dev_priv))
+	if (IS_HASWELL(i915))
  		return haswell_mode_set_planes_workaround(state);
return 0;
@@ -13424,11 +13424,11 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
  static int calc_watermark_data(struct intel_atomic_state *state)
  {
  	struct drm_device *dev = state->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
/* Is there platform-specific watermark information to calculate? */
-	if (dev_priv->display.compute_global_watermarks)
-		return dev_priv->display.compute_global_watermarks(state);
+	if (i915->display.compute_global_watermarks)
+		return i915->display.compute_global_watermarks(state);
return 0;
  }
@@ -13441,7 +13441,7 @@ static int calc_watermark_data(struct intel_atomic_state *state)
  static int intel_atomic_check(struct drm_device *dev,
  			      struct drm_atomic_state *_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_atomic_state *state = to_intel_atomic_state(_state);
  	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
  	struct intel_crtc *crtc;
@@ -13474,7 +13474,7 @@ static int intel_atomic_check(struct drm_device *dev,
  		if (ret)
  			goto fail;
- if (intel_pipe_config_compare(dev_priv, old_crtc_state,
+		if (intel_pipe_config_compare(i915, old_crtc_state,
  					      new_crtc_state, true)) {
  			new_crtc_state->base.mode_changed = false;
  			new_crtc_state->update_pipe = true;
@@ -13493,7 +13493,7 @@ static int intel_atomic_check(struct drm_device *dev,
  		if (ret)
  			goto fail;
  	} else {
-		state->cdclk.logical = dev_priv->cdclk.logical;
+		state->cdclk.logical = i915->cdclk.logical;
  	}
ret = icl_add_linked_planes(state);
@@ -13504,7 +13504,7 @@ static int intel_atomic_check(struct drm_device *dev,
  	if (ret)
  		goto fail;
- intel_fbc_choose_crtc(dev_priv, state);
+	intel_fbc_choose_crtc(i915, state);
  	ret = calc_watermark_data(state);
  	if (ret)
  		goto fail;
@@ -13564,7 +13564,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
  			      struct drm_crtc_state *new_crtc_state)
  {
  	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
  	bool modeset = needs_modeset(new_crtc_state);
@@ -13574,7 +13574,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
if (modeset) {
  		update_scanline_offset(pipe_config);
-		dev_priv->display.crtc_enable(pipe_config, state);
+		i915->display.crtc_enable(pipe_config, state);
/* vblanks work again, re-enable pipe CRC. */
  		intel_crtc_enable_pipe_crc(intel_crtc);
@@ -13593,7 +13593,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc); - if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
  	else
  		i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
@@ -13618,7 +13618,7 @@ static void intel_update_crtcs(struct drm_atomic_state *state)
static void skl_update_crtcs(struct drm_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct drm_i915_private *i915 = to_i915(state->dev);
  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  	struct drm_crtc *crtc;
  	struct intel_crtc *intel_crtc;
@@ -13628,7 +13628,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
  	bool progress;
  	enum pipe pipe;
  	int i;
-	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+	u8 hw_enabled_slices = i915->wm.skl_hw.ddb.enabled_slices;
  	u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
  	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
@@ -13638,8 +13638,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
  			entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
+	if (INTEL_GEN(i915) >= 11 && required_slices > hw_enabled_slices)
+		icl_dbuf_slices_update(i915, required_slices);
/*
  	 * Whenever the number of active pipes changes, we need to make sure we
@@ -13663,7 +13663,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
  							entries,
-							INTEL_INFO(dev_priv)->num_pipes, i))
+							INTEL_INFO(i915)->num_pipes, i))
  				continue;
updated |= cmask;
@@ -13685,57 +13685,57 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
  					  new_crtc_state);
if (vbl_wait)
-				intel_wait_for_vblank(dev_priv, pipe);
+				intel_wait_for_vblank(i915, pipe);
progress = true;
  		}
  	} while (progress);
/* If 2nd DBuf slice is no more required disable it */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
+	if (INTEL_GEN(i915) >= 11 && required_slices < hw_enabled_slices)
+		icl_dbuf_slices_update(i915, required_slices);
  }
-static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+static void intel_atomic_helper_free_state(struct drm_i915_private *i915)
  {
  	struct intel_atomic_state *state, *next;
  	struct llist_node *freed;
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+	freed = llist_del_all(&i915->atomic_helper.free_list);
  	llist_for_each_entry_safe(state, next, freed, freed)
  		drm_atomic_state_put(&state->base);
  }
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), atomic_helper.free_work);
- intel_atomic_helper_free_state(dev_priv);
+	intel_atomic_helper_free_state(i915);
  }
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
  {
  	struct wait_queue_entry wait_fence, wait_reset;
-	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
init_wait_entry(&wait_fence, 0);
  	init_wait_entry(&wait_reset, 0);
  	for (;;) {
  		prepare_to_wait(&intel_state->commit_ready.wait,
  				&wait_fence, TASK_UNINTERRUPTIBLE);
-		prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+		prepare_to_wait(&i915->gpu_error.wait_queue,
  				&wait_reset, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready)
-		    || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+		    || test_bit(I915_RESET_MODESET, &i915->gpu_error.flags))
  			break;
schedule();
  	}
  	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
-	finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+	finish_wait(&i915->gpu_error.wait_queue, &wait_reset);
  }
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -13755,7 +13755,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  {
  	struct drm_device *dev = state->dev;
  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  	struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
  	struct drm_crtc *crtc;
@@ -13769,7 +13769,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  	drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
-		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+		wakeref = intel_display_power_get(i915, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  		old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
@@ -13798,7 +13798,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  			 */
  			intel_crtc_disable_pipe_crc(intel_crtc);
- dev_priv->display.crtc_disable(old_intel_crtc_state, state);
+			i915->display.crtc_disable(old_intel_crtc_state, state);
  			intel_crtc->active = false;
  			intel_fbc_disable(intel_crtc);
  			intel_disable_shared_dpll(old_intel_crtc_state);
@@ -13807,14 +13807,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  			 * Underruns don't always raise
  			 * interrupts, so check manually.
  			 */
-			intel_check_cpu_fifo_underruns(dev_priv);
-			intel_check_pch_fifo_underruns(dev_priv);
+			intel_check_cpu_fifo_underruns(i915);
+			intel_check_pch_fifo_underruns(i915);
/* FIXME unify this for all platforms */
  			if (!new_crtc_state->active &&
-			    !HAS_GMCH(dev_priv) &&
-			    dev_priv->display.initial_watermarks)
-				dev_priv->display.initial_watermarks(intel_state,
+			    !HAS_GMCH(i915) &&
+			    i915->display.initial_watermarks)
+				i915->display.initial_watermarks(intel_state,
  								     new_intel_crtc_state);
  		}
  	}
@@ -13826,9 +13826,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  	if (intel_state->modeset) {
  		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- intel_set_cdclk_pre_plane_update(dev_priv,
+		intel_set_cdclk_pre_plane_update(i915,
  						 &intel_state->cdclk.actual,
-						 &dev_priv->cdclk.actual,
+						 &i915->cdclk.actual,
  						 intel_state->cdclk.pipe);
/*
@@ -13836,7 +13836,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  		 * have more then one pipe enabled
  		 */
  		if (!intel_can_enable_sagv(state))
-			intel_disable_sagv(dev_priv);
+			intel_disable_sagv(i915);
intel_modeset_verify_disabled(dev, state);
  	}
@@ -13856,12 +13856,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  	}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
-	dev_priv->display.update_crtcs(state);
+	i915->display.update_crtcs(state);
if (intel_state->modeset)
-		intel_set_cdclk_post_plane_update(dev_priv,
+		intel_set_cdclk_post_plane_update(i915,
  						  &intel_state->cdclk.actual,
-						  &dev_priv->cdclk.actual,
+						  &i915->cdclk.actual,
  						  intel_state->cdclk.pipe);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -13895,8 +13895,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
- if (dev_priv->display.optimize_watermarks)
-			dev_priv->display.optimize_watermarks(intel_state,
+		if (i915->display.optimize_watermarks)
+			i915->display.optimize_watermarks(intel_state,
  							      new_intel_crtc_state);
  	}
@@ -13904,7 +13904,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
if (put_domains[i])
-			modeset_put_power_domains(dev_priv, put_domains[i]);
+			modeset_put_power_domains(i915, put_domains[i]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
  	}
@@ -13913,7 +13913,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  		intel_verify_planes(intel_state);
if (intel_state->modeset && intel_can_enable_sagv(state))
-		intel_enable_sagv(dev_priv);
+		intel_enable_sagv(i915);
drm_atomic_helper_commit_hw_done(state); @@ -13924,10 +13924,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  		 * so enable debugging for the next modeset - and hope we catch
  		 * the culprit.
  		 */
-		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
-		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+		intel_uncore_arm_unclaimed_mmio_detection(&i915->uncore);
+		intel_display_power_put(i915, POWER_DOMAIN_MODESET, wakeref);
  	}
-	intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+	intel_runtime_pm_put(i915, intel_state->wakeref);
/*
  	 * Defer the cleanup of the old state to a separate worker to not
@@ -14003,10 +14003,10 @@ static int intel_atomic_commit(struct drm_device *dev,
  			       bool nonblock)
  {
  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int ret = 0;
- intel_state->wakeref = intel_runtime_pm_get(dev_priv);
+	intel_state->wakeref = intel_runtime_pm_get(i915);
drm_atomic_state_get(state);
  	i915_sw_fence_init(&intel_state->commit_ready,
@@ -14029,7 +14029,7 @@ static int intel_atomic_commit(struct drm_device *dev,
  	 * FIXME doing watermarks and fb cleanup from a vblank worker
  	 * (assuming we had any) would solve these problems.
  	 */
-	if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+	if (INTEL_GEN(i915) < 9 && state->legacy_cursor_update) {
  		struct intel_crtc_state *new_crtc_state;
  		struct intel_crtc *crtc;
  		int i;
@@ -14044,7 +14044,7 @@ static int intel_atomic_commit(struct drm_device *dev,
  	if (ret) {
  		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  		i915_sw_fence_commit(&intel_state->commit_ready);
-		intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+		intel_runtime_pm_put(i915, intel_state->wakeref);
  		return ret;
  	}
@@ -14056,21 +14056,21 @@ static int intel_atomic_commit(struct drm_device *dev,
  		i915_sw_fence_commit(&intel_state->commit_ready);
drm_atomic_helper_cleanup_planes(dev, state);
-		intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+		intel_runtime_pm_put(i915, intel_state->wakeref);
  		return ret;
  	}
-	dev_priv->wm.distrust_bios_wm = false;
+	i915->wm.distrust_bios_wm = false;
  	intel_shared_dpll_swap_state(state);
  	intel_atomic_track_fbs(state);
if (intel_state->modeset) {
-		memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
+		memcpy(i915->min_cdclk, intel_state->min_cdclk,
  		       sizeof(intel_state->min_cdclk));
-		memcpy(dev_priv->min_voltage_level,
+		memcpy(i915->min_voltage_level,
  		       intel_state->min_voltage_level,
  		       sizeof(intel_state->min_voltage_level));
-		dev_priv->active_crtcs = intel_state->active_crtcs;
-		dev_priv->cdclk.force_min_cdclk =
+		i915->active_crtcs = intel_state->active_crtcs;
+		i915->cdclk.force_min_cdclk =
  			intel_state->cdclk.force_min_cdclk;
intel_cdclk_swap_state(intel_state);
@@ -14081,12 +14081,12 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&intel_state->commit_ready);
  	if (nonblock && intel_state->modeset) {
-		queue_work(dev_priv->modeset_wq, &state->commit_work);
+		queue_work(i915->modeset_wq, &state->commit_work);
  	} else if (nonblock) {
  		queue_work(system_unbound_wq, &state->commit_work);
  	} else {
  		if (intel_state->modeset)
-			flush_workqueue(dev_priv->modeset_wq);
+			flush_workqueue(i915->modeset_wq);
  		intel_atomic_commit_tail(state);
  	}
@@ -14166,14 +14166,14 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
  static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	struct drm_framebuffer *fb = plane_state->base.fb;
  	struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
-	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
+	    INTEL_INFO(i915)->display.cursor_needs_physical) {
  		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-		const int align = intel_cursor_alignment(dev_priv);
+		const int align = intel_cursor_alignment(i915);
  		int err;
err = i915_gem_object_attach_phys(obj, align);
@@ -14231,7 +14231,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  {
  	struct intel_atomic_state *intel_state =
  		to_intel_atomic_state(new_state->state);
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
+	struct drm_i915_private *i915 = to_i915(plane->dev);
  	struct drm_framebuffer *fb = new_state->fb;
  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
@@ -14279,7 +14279,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  	if (ret)
  		return ret;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  	if (ret) {
  		i915_gem_object_unpin_pages(obj);
  		return ret;
@@ -14287,7 +14287,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); - mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  	i915_gem_object_unpin_pages(obj);
  	if (ret)
  		return ret;
@@ -14323,7 +14323,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  	 * maximum clocks following a vblank miss (see do_rps_boost()).
  	 */
  	if (!intel_state->rps_interactive) {
-		intel_rps_mark_interactive(dev_priv, true);
+		intel_rps_mark_interactive(i915, true);
  		intel_state->rps_interactive = true;
  	}
@@ -14345,17 +14345,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
  {
  	struct intel_atomic_state *intel_state =
  		to_intel_atomic_state(old_state->state);
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
+	struct drm_i915_private *i915 = to_i915(plane->dev);
if (intel_state->rps_interactive) {
-		intel_rps_mark_interactive(dev_priv, false);
+		intel_rps_mark_interactive(i915, false);
  		intel_state->rps_interactive = false;
  	}
/* Should only be called after a successful intel_prepare_plane_fb()! */
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	intel_plane_unpin_fb(to_intel_plane_state(old_state));
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  }
int
@@ -14363,7 +14363,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
  	      u32 pixel_format)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int max_scale, mult;
  	int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
@@ -14373,7 +14373,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
  	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  	max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
+	if (IS_GEMINILAKE(i915) || INTEL_GEN(i915) >= 10)
  		max_dotclk *= 2;
if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
@@ -14396,7 +14396,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
  static void intel_begin_crtc_commit(struct intel_atomic_state *state,
  				    struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_crtc_state *old_crtc_state =
  		intel_atomic_get_old_crtc_state(state, crtc);
  	struct intel_crtc_state *new_crtc_state =
@@ -14415,31 +14415,31 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
  		intel_update_pipe_config(old_crtc_state, new_crtc_state);
-	else if (INTEL_GEN(dev_priv) >= 9)
+	else if (INTEL_GEN(i915) >= 9)
  		skl_detach_scalers(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+	if (INTEL_GEN(i915) >= 9 || IS_BROADWELL(i915))
  		bdw_set_pipemisc(new_crtc_state);
out:
-	if (dev_priv->display.atomic_update_watermarks)
-		dev_priv->display.atomic_update_watermarks(state,
+	if (i915->display.atomic_update_watermarks)
+		i915->display.atomic_update_watermarks(state,
  							   new_crtc_state);
  }
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
  				  struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+	if (!IS_GEN(i915, 2))
+		intel_set_cpu_fifo_underrun_reporting(i915, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
  		enum pipe pch_transcoder =
  			intel_crtc_pch_transcoder(crtc);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+		intel_set_pch_fifo_underrun_reporting(i915, pch_transcoder, true);
  	}
  }
@@ -14559,7 +14559,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
  			   u32 src_w, u32 src_h,
  			   struct drm_modeset_acquire_ctx *ctx)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	int ret;
  	struct drm_plane_state *old_plane_state, *new_plane_state;
  	struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -14626,7 +14626,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
  	if (ret)
  		goto out_free;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  	if (ret)
  		goto out_free;
@@ -14664,7 +14664,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
  	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  out_free:
  	if (new_crtc_state)
  		intel_crtc_destroy_state(crtc, &new_crtc_state->base);
@@ -14691,25 +14691,25 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
  	.format_mod_supported = intel_cursor_format_mod_supported,
  };
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+static bool i9xx_plane_has_fbc(struct drm_i915_private *i915,
  			       enum i9xx_plane_id i9xx_plane)
  {
-	if (!HAS_FBC(dev_priv))
+	if (!HAS_FBC(i915))
  		return false;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+	if (IS_BROADWELL(i915) || IS_HASWELL(i915))
  		return i9xx_plane == PLANE_A; /* tied to pipe A */
-	else if (IS_IVYBRIDGE(dev_priv))
+	else if (IS_IVYBRIDGE(i915))
  		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
  			i9xx_plane == PLANE_C;
-	else if (INTEL_GEN(dev_priv) >= 4)
+	else if (INTEL_GEN(i915) >= 4)
  		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
  	else
  		return i9xx_plane == PLANE_A;
  }
static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_primary_plane_create(struct drm_i915_private *i915, enum pipe pipe)
  {
  	struct intel_plane *plane;
  	const struct drm_plane_funcs *plane_funcs;
@@ -14720,8 +14720,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  	int num_formats;
  	int ret;
- if (INTEL_GEN(dev_priv) >= 9)
-		return skl_universal_plane_create(dev_priv, pipe,
+	if (INTEL_GEN(i915) >= 9)
+		return skl_universal_plane_create(i915, pipe,
  						  PLANE_PRIMARY);
plane = intel_plane_alloc();
@@ -14733,21 +14733,21 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
  	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
  	 */
-	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+	if (HAS_FBC(i915) && INTEL_GEN(i915) < 4)
  		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
  	else
  		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
  	plane->id = PLANE_PRIMARY;
  	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+	plane->has_fbc = i9xx_plane_has_fbc(i915, plane->i9xx_plane);
  	if (plane->has_fbc) {
-		struct intel_fbc *fbc = &dev_priv->fbc;
+		struct intel_fbc *fbc = &i915->fbc;
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
  	}
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		formats = i965_primary_formats;
  		num_formats = ARRAY_SIZE(i965_primary_formats);
  		modifiers = i9xx_format_modifiers;
@@ -14775,14 +14775,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
possible_crtcs = BIT(pipe); - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+	if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
+		ret = drm_universal_plane_init(&i915->drm, &plane->base,
  					       possible_crtcs, plane_funcs,
  					       formats, num_formats, modifiers,
  					       DRM_PLANE_TYPE_PRIMARY,
  					       "primary %c", pipe_name(pipe));
  	else
-		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+		ret = drm_universal_plane_init(&i915->drm, &plane->base,
  					       possible_crtcs, plane_funcs,
  					       formats, num_formats, modifiers,
  					       DRM_PLANE_TYPE_PRIMARY,
@@ -14791,18 +14791,18 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  	if (ret)
  		goto fail;
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_B) {
  		supported_rotations =
  			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
  			DRM_MODE_REFLECT_X;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
+	} else if (INTEL_GEN(i915) >= 4) {
  		supported_rotations =
  			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
  	} else {
  		supported_rotations = DRM_MODE_ROTATE_0;
  	}
- if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		drm_plane_create_rotation_property(&plane->base,
  						   DRM_MODE_ROTATE_0,
  						   supported_rotations);
@@ -14818,7 +14818,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  }
static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+intel_cursor_plane_create(struct drm_i915_private *i915,
  			  enum pipe pipe)
  {
  	unsigned int possible_crtcs;
@@ -14834,7 +14834,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
  	cursor->id = PLANE_CURSOR;
  	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+	if (IS_I845G(i915) || IS_I865G(i915)) {
  		cursor->max_stride = i845_cursor_max_stride;
  		cursor->update_plane = i845_update_cursor;
  		cursor->disable_plane = i845_disable_cursor;
@@ -14851,12 +14851,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
  	cursor->cursor.base = ~0;
  	cursor->cursor.cntl = ~0;
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+	if (IS_I845G(i915) || IS_I865G(i915) || HAS_CUR_FBC(i915))
  		cursor->cursor.size = ~0;
possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+	ret = drm_universal_plane_init(&i915->drm, &cursor->base,
  				       possible_crtcs, &intel_cursor_plane_funcs,
  				       intel_cursor_formats,
  				       ARRAY_SIZE(intel_cursor_formats),
@@ -14866,7 +14866,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
  	if (ret)
  		goto fail;
- if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		drm_plane_create_rotation_property(&cursor->base,
  						   DRM_MODE_ROTATE_0,
  						   DRM_MODE_ROTATE_0 |
@@ -14887,10 +14887,10 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
  {
  	struct intel_crtc_scaler_state *scaler_state =
  		&crtc_state->scaler_state;
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	int i;
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
+	crtc->num_scalers = RUNTIME_INFO(i915)->num_scalers[crtc->pipe];
  	if (!crtc->num_scalers)
  		return;
@@ -14904,7 +14904,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
  	scaler_state->scaler_id = -1;
  }
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+static int intel_crtc_init(struct drm_i915_private *i915, enum pipe pipe)
  {
  	struct intel_crtc *intel_crtc;
  	struct intel_crtc_state *crtc_state = NULL;
@@ -14924,17 +14924,17 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  	__drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
  	intel_crtc->config = crtc_state;
- primary = intel_primary_plane_create(dev_priv, pipe);
+	primary = intel_primary_plane_create(i915, pipe);
  	if (IS_ERR(primary)) {
  		ret = PTR_ERR(primary);
  		goto fail;
  	}
  	intel_crtc->plane_ids_mask |= BIT(primary->id);
- for_each_sprite(dev_priv, pipe, sprite) {
+	for_each_sprite(i915, pipe, sprite) {
  		struct intel_plane *plane;
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+		plane = intel_sprite_plane_create(i915, pipe, sprite);
  		if (IS_ERR(plane)) {
  			ret = PTR_ERR(plane);
  			goto fail;
@@ -14942,14 +14942,14 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  		intel_crtc->plane_ids_mask |= BIT(plane->id);
  	}
- cursor = intel_cursor_plane_create(dev_priv, pipe);
+	cursor = intel_cursor_plane_create(i915, pipe);
  	if (IS_ERR(cursor)) {
  		ret = PTR_ERR(cursor);
  		goto fail;
  	}
  	intel_crtc->plane_ids_mask |= BIT(cursor->id);
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+	ret = drm_crtc_init_with_planes(&i915->drm, &intel_crtc->base,
  					&primary->base, &cursor->base,
  					&intel_crtc_funcs,
  					"pipe %c", pipe_name(pipe));
@@ -14961,16 +14961,16 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  	/* initialize shared scalers */
  	intel_crtc_init_scalers(intel_crtc, crtc_state);
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
-	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
-	dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+	BUG_ON(pipe >= ARRAY_SIZE(i915->pipe_to_crtc_mapping) ||
+	       i915->pipe_to_crtc_mapping[pipe] != NULL);
+	i915->pipe_to_crtc_mapping[pipe] = intel_crtc;
- if (INTEL_GEN(dev_priv) < 9) {
+	if (INTEL_GEN(i915) < 9) {
  		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
-		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
-		dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+		BUG_ON(i9xx_plane >= ARRAY_SIZE(i915->plane_to_crtc_mapping) ||
+		       i915->plane_to_crtc_mapping[i9xx_plane] != NULL);
+		i915->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
  	}
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -15026,29 +15026,29 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
  	return index_mask;
  }
-static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct drm_i915_private *i915)
  {
-	if (!IS_MOBILE(dev_priv))
+	if (!IS_MOBILE(i915))
  		return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  		return false;
- if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+	if (IS_GEN(i915, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  		return false;
return true;
  }
-static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return false;
- if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+	if (IS_HSW_ULT(i915) || IS_BDW_ULT(i915))
  		return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
+	if (HAS_PCH_LPT_H(i915) &&
  	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  		return false;
@@ -15056,24 +15056,24 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
  	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  		return false;
- if (!dev_priv->vbt.int_crt_support)
+	if (!i915->vbt.int_crt_support)
  		return false;
return true;
  }
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915)
  {
  	int pps_num;
  	int pps_idx;
- if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		return;
  	/*
  	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
  	 * everywhere where registers can be write protected.
  	 */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		pps_num = 2;
  	else
  		pps_num = 1;
@@ -15086,65 +15086,65 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
  	}
  }
-static void intel_pps_init(struct drm_i915_private *dev_priv)
+static void intel_pps_init(struct drm_i915_private *i915)
  {
-	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
-		dev_priv->pps_mmio_base = PCH_PPS_BASE;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->pps_mmio_base = VLV_PPS_BASE;
+	if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+		i915->pps_mmio_base = PCH_PPS_BASE;
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->pps_mmio_base = VLV_PPS_BASE;
  	else
-		dev_priv->pps_mmio_base = PPS_BASE;
+		i915->pps_mmio_base = PPS_BASE;
- intel_pps_unlock_regs_wa(dev_priv);
+	intel_pps_unlock_regs_wa(i915);
  }
-static void intel_setup_outputs(struct drm_i915_private *dev_priv)
+static void intel_setup_outputs(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
  	bool dpd_is_edp = false;
- intel_pps_init(dev_priv);
+	intel_pps_init(i915);
- if (!HAS_DISPLAY(dev_priv))
+	if (!HAS_DISPLAY(i915))
  		return;
- if (IS_ELKHARTLAKE(dev_priv)) {
-		intel_ddi_init(dev_priv, PORT_A);
-		intel_ddi_init(dev_priv, PORT_B);
-		intel_ddi_init(dev_priv, PORT_C);
-		icl_dsi_init(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 11) {
-		intel_ddi_init(dev_priv, PORT_A);
-		intel_ddi_init(dev_priv, PORT_B);
-		intel_ddi_init(dev_priv, PORT_C);
-		intel_ddi_init(dev_priv, PORT_D);
-		intel_ddi_init(dev_priv, PORT_E);
+	if (IS_ELKHARTLAKE(i915)) {
+		intel_ddi_init(i915, PORT_A);
+		intel_ddi_init(i915, PORT_B);
+		intel_ddi_init(i915, PORT_C);
+		icl_dsi_init(i915);
+	} else if (INTEL_GEN(i915) >= 11) {
+		intel_ddi_init(i915, PORT_A);
+		intel_ddi_init(i915, PORT_B);
+		intel_ddi_init(i915, PORT_C);
+		intel_ddi_init(i915, PORT_D);
+		intel_ddi_init(i915, PORT_E);
  		/*
  		 * On some ICL SKUs port F is not present. No strap bits for
  		 * this, so rely on VBT.
  		 * Work around broken VBTs on SKUs known to have no port F.
  		 */
-		if (IS_ICL_WITH_PORT_F(dev_priv) &&
-		    intel_bios_is_port_present(dev_priv, PORT_F))
-			intel_ddi_init(dev_priv, PORT_F);
+		if (IS_ICL_WITH_PORT_F(i915) &&
+		    intel_bios_is_port_present(i915, PORT_F))
+			intel_ddi_init(i915, PORT_F);
- icl_dsi_init(dev_priv);
-	} else if (IS_GEN9_LP(dev_priv)) {
+		icl_dsi_init(i915);
+	} else if (IS_GEN9_LP(i915)) {
  		/*
  		 * FIXME: Broxton doesn't support port detection via the
  		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  		 * detect the ports.
  		 */
-		intel_ddi_init(dev_priv, PORT_A);
-		intel_ddi_init(dev_priv, PORT_B);
-		intel_ddi_init(dev_priv, PORT_C);
+		intel_ddi_init(i915, PORT_A);
+		intel_ddi_init(i915, PORT_B);
+		intel_ddi_init(i915, PORT_C);
- vlv_dsi_init(dev_priv);
-	} else if (HAS_DDI(dev_priv)) {
+		vlv_dsi_init(i915);
+	} else if (HAS_DDI(i915)) {
  		int found;
- if (intel_ddi_crt_present(dev_priv))
-			intel_crt_init(dev_priv);
+		if (intel_ddi_crt_present(i915))
+			intel_crt_init(i915);
/*
  		 * Haswell uses DDI functions to detect digital outputs.
@@ -15153,29 +15153,29 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
  		 */
  		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  		/* WaIgnoreDDIAStrap: skl */
-		if (found || IS_GEN9_BC(dev_priv))
-			intel_ddi_init(dev_priv, PORT_A);
+		if (found || IS_GEN9_BC(i915))
+			intel_ddi_init(i915, PORT_A);
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
  		 * register */
  		found = I915_READ(SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
-			intel_ddi_init(dev_priv, PORT_B);
+			intel_ddi_init(i915, PORT_B);
  		if (found & SFUSE_STRAP_DDIC_DETECTED)
-			intel_ddi_init(dev_priv, PORT_C);
+			intel_ddi_init(i915, PORT_C);
  		if (found & SFUSE_STRAP_DDID_DETECTED)
-			intel_ddi_init(dev_priv, PORT_D);
+			intel_ddi_init(i915, PORT_D);
  		if (found & SFUSE_STRAP_DDIF_DETECTED)
-			intel_ddi_init(dev_priv, PORT_F);
+			intel_ddi_init(i915, PORT_F);
  		/*
  		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  		 */
-		if (IS_GEN9_BC(dev_priv) &&
-		    intel_bios_is_port_present(dev_priv, PORT_E))
-			intel_ddi_init(dev_priv, PORT_E);
+		if (IS_GEN9_BC(i915) &&
+		    intel_bios_is_port_present(i915, PORT_E))
+			intel_ddi_init(i915, PORT_E);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_PCH_SPLIT(i915)) {
  		int found;
/*
@@ -15183,39 +15183,39 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
  		 * to prevent the registration of both eDP and LVDS and the
  		 * incorrect sharing of the PPS.
  		 */
-		intel_lvds_init(dev_priv);
-		intel_crt_init(dev_priv);
+		intel_lvds_init(i915);
+		intel_crt_init(i915);
- dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+		dpd_is_edp = intel_dp_is_port_edp(i915, PORT_D);
- if (ilk_has_edp_a(dev_priv))
-			intel_dp_init(dev_priv, DP_A, PORT_A);
+		if (ilk_has_edp_a(i915))
+			intel_dp_init(i915, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  			/* PCH SDVOB multiplex with HDMIB */
-			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
+			found = intel_sdvo_init(i915, PCH_SDVOB, PORT_B);
  			if (!found)
-				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
+				intel_hdmi_init(i915, PCH_HDMIB, PORT_B);
  			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
-				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
+				intel_dp_init(i915, PCH_DP_B, PORT_B);
  		}
if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
-			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+			intel_hdmi_init(i915, PCH_HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
-			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+			intel_hdmi_init(i915, PCH_HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
-			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
+			intel_dp_init(i915, PCH_DP_C, PORT_C);
if (I915_READ(PCH_DP_D) & DP_DETECTED)
-			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+			intel_dp_init(i915, PCH_DP_D, PORT_D);
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
-			intel_crt_init(dev_priv);
+		if (IS_VALLEYVIEW(i915) && i915->vbt.int_crt_support)
+			intel_crt_init(i915);
/*
  		 * The DP_DETECTED bit is the latched state of the DDC
@@ -15232,97 +15232,97 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
  		 * trust the port type the VBT declares as we've seen at least
  		 * HDMI ports that the VBT claim are DP or eDP.
  		 */
-		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
-		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+		has_edp = intel_dp_is_port_edp(i915, PORT_B);
+		has_port = intel_bios_is_port_present(i915, PORT_B);
  		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
-			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
+			has_edp &= intel_dp_init(i915, VLV_DP_B, PORT_B);
  		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
-			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+			intel_hdmi_init(i915, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
-		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+		has_edp = intel_dp_is_port_edp(i915, PORT_C);
+		has_port = intel_bios_is_port_present(i915, PORT_C);
  		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
-			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
+			has_edp &= intel_dp_init(i915, VLV_DP_C, PORT_C);
  		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
-			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+			intel_hdmi_init(i915, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev_priv)) {
+		if (IS_CHERRYVIEW(i915)) {
  			/*
  			 * eDP not supported on port D,
  			 * so no need to worry about it
  			 */
-			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+			has_port = intel_bios_is_port_present(i915, PORT_D);
  			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
-				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
+				intel_dp_init(i915, CHV_DP_D, PORT_D);
  			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
-				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+				intel_hdmi_init(i915, CHV_HDMID, PORT_D);
  		}
- vlv_dsi_init(dev_priv);
-	} else if (IS_PINEVIEW(dev_priv)) {
-		intel_lvds_init(dev_priv);
-		intel_crt_init(dev_priv);
-	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+		vlv_dsi_init(i915);
+	} else if (IS_PINEVIEW(i915)) {
+		intel_lvds_init(i915);
+		intel_crt_init(i915);
+	} else if (IS_GEN_RANGE(i915, 3, 4)) {
  		bool found = false;
- if (IS_MOBILE(dev_priv))
-			intel_lvds_init(dev_priv);
+		if (IS_MOBILE(i915))
+			intel_lvds_init(i915);
- intel_crt_init(dev_priv);
+		intel_crt_init(i915);
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  			DRM_DEBUG_KMS("probing SDVOB\n");
-			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
-			if (!found && IS_G4X(dev_priv)) {
+			found = intel_sdvo_init(i915, GEN3_SDVOB, PORT_B);
+			if (!found && IS_G4X(i915)) {
  				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+				intel_hdmi_init(i915, GEN4_HDMIB, PORT_B);
  			}
- if (!found && IS_G4X(dev_priv))
-				intel_dp_init(dev_priv, DP_B, PORT_B);
+			if (!found && IS_G4X(i915))
+				intel_dp_init(i915, DP_B, PORT_B);
  		}
/* Before G4X SDVOC doesn't have its own detect register */ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  			DRM_DEBUG_KMS("probing SDVOC\n");
-			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
+			found = intel_sdvo_init(i915, GEN3_SDVOC, PORT_C);
  		}
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { - if (IS_G4X(dev_priv)) {
+			if (IS_G4X(i915)) {
  				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+				intel_hdmi_init(i915, GEN4_HDMIC, PORT_C);
  			}
-			if (IS_G4X(dev_priv))
-				intel_dp_init(dev_priv, DP_C, PORT_C);
+			if (IS_G4X(i915))
+				intel_dp_init(i915, DP_C, PORT_C);
  		}
- if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
-			intel_dp_init(dev_priv, DP_D, PORT_D);
+		if (IS_G4X(i915) && (I915_READ(DP_D) & DP_DETECTED))
+			intel_dp_init(i915, DP_D, PORT_D);
- if (SUPPORTS_TV(dev_priv))
-			intel_tv_init(dev_priv);
-	} else if (IS_GEN(dev_priv, 2)) {
-		if (IS_I85X(dev_priv))
-			intel_lvds_init(dev_priv);
+		if (SUPPORTS_TV(i915))
+			intel_tv_init(i915);
+	} else if (IS_GEN(i915, 2)) {
+		if (IS_I85X(i915))
+			intel_lvds_init(i915);
- intel_crt_init(dev_priv);
-		intel_dvo_init(dev_priv);
+		intel_crt_init(i915);
+		intel_dvo_init(i915);
  	}
- intel_psr_init(dev_priv);
+	intel_psr_init(i915);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		encoder->base.possible_crtcs = encoder->crtc_mask;
  		encoder->base.possible_clones =
  			intel_encoder_clones(encoder);
  	}
- intel_init_pch_refclk(dev_priv);
+	intel_init_pch_refclk(i915);
- drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+	drm_helper_move_panel_connectors_to_head(&i915->drm);
  }
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -15379,7 +15379,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  				  struct drm_i915_gem_object *obj,
  				  struct drm_mode_fb_cmd2 *mode_cmd)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
  	struct drm_framebuffer *fb = &intel_fb->base;
  	u32 max_stride;
  	unsigned int tiling, stride;
@@ -15411,7 +15411,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  		}
  	}
- if (!drm_any_plane_has_format(&dev_priv->drm,
+	if (!drm_any_plane_has_format(&i915->drm,
  				      mode_cmd->pixel_format,
  				      mode_cmd->modifier[0])) {
  		struct drm_format_name_buf format_name;
@@ -15427,13 +15427,13 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  	 * gen2/3 display engine uses the fence if present,
  	 * so the tiling mode must match the fb modifier exactly.
  	 */
-	if (INTEL_GEN(dev_priv) < 4 &&
+	if (INTEL_GEN(i915) < 4 &&
  	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
  		goto err;
  	}
- max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+	max_stride = intel_fb_max_stride(i915, mode_cmd->pixel_format,
  					 mode_cmd->modifier[0]);
  	if (mode_cmd->pitches[0] > max_stride) {
  		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
@@ -15457,7 +15457,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  	if (mode_cmd->offsets[0] != 0)
  		goto err;
- drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+	drm_helper_mode_fill_fb_struct(&i915->drm, fb, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++) {
  		u32 stride_alignment;
@@ -15478,7 +15478,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  		 * require the entire fb to accommodate that to avoid
  		 * potential runtime errors at plane configuration time.
  		 */
-		if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
+		if (IS_GEN(i915, 9) && i == 0 && fb->width > 3840 &&
  		    is_ccs_modifier(fb->modifier))
  			stride_alignment *= 4;
@@ -15491,11 +15491,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  		fb->obj[i] = &obj->base;
  	}
- ret = intel_fill_fb_info(dev_priv, fb);
+	ret = intel_fill_fb_info(i915, fb);
  	if (ret)
  		goto err;
- ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+	ret = drm_framebuffer_init(&i915->drm, fb, &intel_fb_funcs);
  	if (ret) {
  		DRM_ERROR("framebuffer init failed %d\n", ret);
  		goto err;
@@ -15545,7 +15545,7 @@ static enum drm_mode_status
  intel_mode_valid(struct drm_device *dev,
  		 const struct drm_display_mode *mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	int hdisplay_max, htotal_max;
  	int vdisplay_max, vtotal_max;
@@ -15578,13 +15578,13 @@ intel_mode_valid(struct drm_device *dev,
  			   DRM_MODE_FLAG_CLKDIV2))
  		return MODE_BAD;
- if (INTEL_GEN(dev_priv) >= 9 ||
-	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+	if (INTEL_GEN(i915) >= 9 ||
+	    IS_BROADWELL(i915) || IS_HASWELL(i915)) {
  		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
  		vdisplay_max = 4096;
  		htotal_max = 8192;
  		vtotal_max = 8192;
-	} else if (INTEL_GEN(dev_priv) >= 3) {
+	} else if (INTEL_GEN(i915) >= 3) {
  		hdisplay_max = 4096;
  		vdisplay_max = 4096;
  		htotal_max = 8192;
@@ -15625,113 +15625,113 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
/**
   * intel_init_display_hooks - initialize the display modesetting hooks
- * @dev_priv: device private
+ * @i915: device private
   */
-void intel_init_display_hooks(struct drm_i915_private *dev_priv)
+void intel_init_display_hooks(struct drm_i915_private *i915)
  {
-	intel_init_cdclk_hooks(dev_priv);
+	intel_init_cdclk_hooks(i915);
- if (INTEL_GEN(dev_priv) >= 9) {
-		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+	if (INTEL_GEN(i915) >= 9) {
+		i915->display.get_pipe_config = haswell_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			skylake_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
+		i915->display.crtc_compute_clock =
  			haswell_crtc_compute_clock;
-		dev_priv->display.crtc_enable = haswell_crtc_enable;
-		dev_priv->display.crtc_disable = haswell_crtc_disable;
-	} else if (HAS_DDI(dev_priv)) {
-		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_enable = haswell_crtc_enable;
+		i915->display.crtc_disable = haswell_crtc_disable;
+	} else if (HAS_DDI(i915)) {
+		i915->display.get_pipe_config = haswell_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
+		i915->display.crtc_compute_clock =
  			haswell_crtc_compute_clock;
-		dev_priv->display.crtc_enable = haswell_crtc_enable;
-		dev_priv->display.crtc_disable = haswell_crtc_disable;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_enable = haswell_crtc_enable;
+		i915->display.crtc_disable = haswell_crtc_disable;
+	} else if (HAS_PCH_SPLIT(i915)) {
+		i915->display.get_pipe_config = ironlake_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
+		i915->display.crtc_compute_clock =
  			ironlake_crtc_compute_clock;
-		dev_priv->display.crtc_enable = ironlake_crtc_enable;
-		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_enable = ironlake_crtc_enable;
+		i915->display.crtc_disable = ironlake_crtc_disable;
+	} else if (IS_CHERRYVIEW(i915)) {
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
-		dev_priv->display.crtc_enable = valleyview_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_compute_clock = chv_crtc_compute_clock;
+		i915->display.crtc_enable = valleyview_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
+	} else if (IS_VALLEYVIEW(i915)) {
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
-		dev_priv->display.crtc_enable = valleyview_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_G4X(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_compute_clock = vlv_crtc_compute_clock;
+		i915->display.crtc_enable = valleyview_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
+	} else if (IS_G4X(i915)) {
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (IS_PINEVIEW(dev_priv)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_compute_clock = g4x_crtc_compute_clock;
+		i915->display.crtc_enable = i9xx_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
+	} else if (IS_PINEVIEW(i915)) {
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else if (!IS_GEN(dev_priv, 2)) {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.crtc_compute_clock = pnv_crtc_compute_clock;
+		i915->display.crtc_enable = i9xx_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
+	} else if (!IS_GEN(i915, 2)) {
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		i915->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+		i915->display.crtc_enable = i9xx_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
  	} else {
-		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_initial_plane_config =
+		i915->display.get_pipe_config = i9xx_get_pipe_config;
+		i915->display.get_initial_plane_config =
  			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
-		dev_priv->display.crtc_enable = i9xx_crtc_enable;
-		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		i915->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+		i915->display.crtc_enable = i9xx_crtc_enable;
+		i915->display.crtc_disable = i9xx_crtc_disable;
  	}
- if (IS_GEN(dev_priv, 5)) {
-		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-	} else if (IS_GEN(dev_priv, 6)) {
-		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-	} else if (IS_IVYBRIDGE(dev_priv)) {
+	if (IS_GEN(i915, 5)) {
+		i915->display.fdi_link_train = ironlake_fdi_link_train;
+	} else if (IS_GEN(i915, 6)) {
+		i915->display.fdi_link_train = gen6_fdi_link_train;
+	} else if (IS_IVYBRIDGE(i915)) {
  		/* FIXME: detect B0+ stepping and use auto training */
-		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+		i915->display.fdi_link_train = ivb_manual_fdi_link_train;
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		i915->display.fdi_link_train = hsw_fdi_link_train;
  	}
- if (INTEL_GEN(dev_priv) >= 9)
-		dev_priv->display.update_crtcs = skl_update_crtcs;
+	if (INTEL_GEN(i915) >= 9)
+		i915->display.update_crtcs = skl_update_crtcs;
  	else
-		dev_priv->display.update_crtcs = intel_update_crtcs;
+		i915->display.update_crtcs = intel_update_crtcs;
  }
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
+static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *i915)
  {
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		return VLV_VGACNTRL;
-	else if (INTEL_GEN(dev_priv) >= 5)
+	else if (INTEL_GEN(i915) >= 5)
  		return CPU_VGACNTRL;
  	else
  		return VGACNTRL;
  }
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
+static void i915_disable_vga(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u8 sr1;
-	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+	i915_reg_t vga_reg = i915_vgacntrl_reg(i915);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -15747,11 +15747,11 @@ static void i915_disable_vga(struct drm_i915_private *dev_priv)
void intel_modeset_init_hw(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-	dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
+	i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
  }
/*
@@ -15766,7 +15766,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
   */
  static void sanitize_watermarks(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_atomic_state *state;
  	struct intel_atomic_state *intel_state;
  	struct drm_crtc *crtc;
@@ -15776,7 +15776,7 @@ static void sanitize_watermarks(struct drm_device *dev)
  	int i;
/* Only supported on platforms that use atomic watermark design */
-	if (!dev_priv->display.optimize_watermarks)
+	if (!i915->display.optimize_watermarks)
  		return;
/*
@@ -15804,7 +15804,7 @@ static void sanitize_watermarks(struct drm_device *dev)
  	 * intermediate watermarks (since we don't trust the current
  	 * watermarks).
  	 */
-	if (!HAS_GMCH(dev_priv))
+	if (!HAS_GMCH(i915))
  		intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
@@ -15829,7 +15829,7 @@ static void sanitize_watermarks(struct drm_device *dev)
  		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
-		dev_priv->display.optimize_watermarks(intel_state, cs);
+		i915->display.optimize_watermarks(intel_state, cs);
to_intel_crtc_state(crtc->state)->wm = cs->wm;
  	}
@@ -15841,20 +15841,20 @@ static void sanitize_watermarks(struct drm_device *dev)
  	drm_modeset_acquire_fini(&ctx);
  }
-static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
+static void intel_update_fdi_pll_freq(struct drm_i915_private *i915)
  {
-	if (IS_GEN(dev_priv, 5)) {
+	if (IS_GEN(i915, 5)) {
  		u32 fdi_pll_clk =
  			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
- dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
-	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
-		dev_priv->fdi_pll_freq = 270000;
+		i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+	} else if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915)) {
+		i915->fdi_pll_freq = 270000;
  	} else {
  		return;
  	}
- DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", i915->fdi_pll_freq);
  }
static int intel_initial_commit(struct drm_device *dev)
@@ -15915,17 +15915,17 @@ static int intel_initial_commit(struct drm_device *dev)
int intel_modeset_init(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	enum pipe pipe;
  	struct intel_crtc *crtc;
  	int ret;
- dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
drm_mode_config_init(dev); - ret = intel_bw_init(dev_priv);
+	ret = intel_bw_init(i915);
  	if (ret)
  		return ret;
@@ -15939,15 +15939,15 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; - init_llist_head(&dev_priv->atomic_helper.free_list);
-	INIT_WORK(&dev_priv->atomic_helper.free_work,
+	init_llist_head(&i915->atomic_helper.free_list);
+	INIT_WORK(&i915->atomic_helper.free_work,
  		  intel_atomic_helper_free_state_worker);
- intel_init_quirks(dev_priv);
+	intel_init_quirks(i915);
- intel_fbc_init(dev_priv);
+	intel_fbc_init(i915);
- intel_init_pm(dev_priv);
+	intel_init_pm(i915);
/*
  	 * There may be no VBT; and if the BIOS enabled SSC we can
@@ -15955,15 +15955,15 @@ int intel_modeset_init(struct drm_device *dev)
  	 * BIOS isn't using it, don't assume it will work even if the VBT
  	 * indicates as much.
  	 */
-	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915)) {
  		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  					    DREF_SSC1_ENABLE);
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+		if (i915->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  				     bios_lvds_use_ssc ? "en" : "dis",
-				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
-			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+				     i915->vbt.lvds_use_ssc ? "en" : "dis");
+			i915->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  		}
  	}
@@ -15971,13 +15971,13 @@ int intel_modeset_init(struct drm_device *dev)
  	 * Maximum framebuffer dimensions, chosen to match
  	 * the maximum render engine surface size on gen4+.
  	 */
-	if (INTEL_GEN(dev_priv) >= 7) {
+	if (INTEL_GEN(i915) >= 7) {
  		dev->mode_config.max_width = 16384;
  		dev->mode_config.max_height = 16384;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
+	} else if (INTEL_GEN(i915) >= 4) {
  		dev->mode_config.max_width = 8192;
  		dev->mode_config.max_height = 8192;
-	} else if (IS_GEN(dev_priv, 3)) {
+	} else if (IS_GEN(i915, 3)) {
  		dev->mode_config.max_width = 4096;
  		dev->mode_config.max_height = 4096;
  	} else {
@@ -15985,10 +15985,10 @@ int intel_modeset_init(struct drm_device *dev)
  		dev->mode_config.max_height = 2048;
  	}
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
-		dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
+	if (IS_I845G(i915) || IS_I865G(i915)) {
+		dev->mode_config.cursor_width = IS_I845G(i915) ? 64 : 512;
  		dev->mode_config.cursor_height = 1023;
-	} else if (IS_GEN(dev_priv, 2)) {
+	} else if (IS_GEN(i915, 2)) {
  		dev->mode_config.cursor_width = 64;
  		dev->mode_config.cursor_height = 64;
  	} else {
@@ -15999,11 +15999,11 @@ int intel_modeset_init(struct drm_device *dev)
  	dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
-		      INTEL_INFO(dev_priv)->num_pipes,
-		      INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+		      INTEL_INFO(i915)->num_pipes,
+		      INTEL_INFO(i915)->num_pipes > 1 ? "s" : "");
- for_each_pipe(dev_priv, pipe) {
-		ret = intel_crtc_init(dev_priv, pipe);
+	for_each_pipe(i915, pipe) {
+		ret = intel_crtc_init(i915, pipe);
  		if (ret) {
  			drm_mode_config_cleanup(dev);
  			return ret;
@@ -16011,19 +16011,19 @@ int intel_modeset_init(struct drm_device *dev)
  	}
intel_shared_dpll_init(dev);
-	intel_update_fdi_pll_freq(dev_priv);
+	intel_update_fdi_pll_freq(i915);
- intel_update_czclk(dev_priv);
+	intel_update_czclk(i915);
  	intel_modeset_init_hw(dev);
- intel_hdcp_component_init(dev_priv);
+	intel_hdcp_component_init(i915);
- if (dev_priv->max_cdclk_freq == 0)
-		intel_update_max_cdclk(dev_priv);
+	if (i915->max_cdclk_freq == 0)
+		intel_update_max_cdclk(i915);
/* Just disable it once at startup */
-	i915_disable_vga(dev_priv);
-	intel_setup_outputs(dev_priv);
+	i915_disable_vga(i915);
+	intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
  	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
@@ -16042,7 +16042,7 @@ int intel_modeset_init(struct drm_device *dev)
  		 * can even allow for smooth boot transitions if the BIOS
  		 * fb is large enough for the active pipe configuration.
  		 */
-		dev_priv->display.get_initial_plane_config(crtc,
+		i915->display.get_initial_plane_config(crtc,
  							   &plane_config);
/*
@@ -16057,7 +16057,7 @@ int intel_modeset_init(struct drm_device *dev)
  	 * Note that we need to do this after reconstructing the BIOS fb's
  	 * since the watermark calculation done here will use pstate->fb.
  	 */
-	if (!HAS_GMCH(dev_priv))
+	if (!HAS_GMCH(i915))
  		sanitize_watermarks(dev);
/*
@@ -16073,9 +16073,9 @@ int intel_modeset_init(struct drm_device *dev)
  	return 0;
  }
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+void i830_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
  {
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
  	/* 640x480@60Hz, ~25175 kHz */
  	struct dpll clock = {
  		.m1 = 18,
@@ -16143,9 +16143,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
  	intel_wait_for_pipe_scanline_moving(crtc);
  }
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+void i830_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
  {
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
  		      pipe_name(pipe));
@@ -16166,14 +16166,14 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
  }
static void
-intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+intel_sanitize_plane_mapping(struct drm_i915_private *i915)
  {
  	struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		return;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_plane *plane =
  			to_intel_plane(crtc->base.primary);
  		struct intel_crtc *plane_crtc;
@@ -16188,7 +16188,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
  		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
  			      plane->base.base.id, plane->base.name);
- plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+		plane_crtc = intel_get_crtc_for_pipe(i915, pipe);
  		intel_plane_disable_noatomic(plane_crtc, plane);
  	}
  }
@@ -16215,18 +16215,18 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
  	return NULL;
  }
-static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
+static bool has_pch_trancoder(struct drm_i915_private *i915,
  			      enum pipe pch_transcoder)
  {
-	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
-		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
+	return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
+		(HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
  }
static void intel_sanitize_crtc(struct intel_crtc *crtc,
  				struct drm_modeset_acquire_ctx *ctx)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -16255,7 +16255,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
  		 * Disable any background color set by the BIOS, but enable the
  		 * gamma and CSC to match how we program our planes.
  		 */
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
  				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
  				   SKL_BOTTOM_COLOR_CSC_ENABLE);
@@ -16266,7 +16266,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
  	if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
  		intel_crtc_disable_noatomic(&crtc->base, ctx);
- if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+	if (crtc_state->base.active || HAS_GMCH(i915)) {
  		/*
  		 * We start out with underrun reporting disabled to avoid races.
  		 * For correct bookkeeping mark this on active crtcs.
@@ -16290,14 +16290,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
  		 * PCH transcoders B and C would prevent enabling the south
  		 * error interrupt (see cpt_can_enable_serr_int()).
  		 */
-		if (has_pch_trancoder(dev_priv, crtc->pipe))
+		if (has_pch_trancoder(i915, crtc->pipe))
  			crtc->pch_fifo_underrun_disabled = true;
  	}
  }
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
/*
  	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -16309,7 +16309,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
  	 * without several WARNs, but for now let's take the easy
  	 * road.
  	 */
-	return IS_GEN(dev_priv, 6) &&
+	return IS_GEN(i915, 6) &&
  		crtc_state->base.active &&
  		crtc_state->shared_dpll &&
  		crtc_state->port_clock == 0;
@@ -16317,7 +16317,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_connector *connector;
  	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  	struct intel_crtc_state *crtc_state = crtc ?
@@ -16378,21 +16378,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
  	/* notify opregion of the sanitized encoder state */
  	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		icl_sanitize_encoder_pll_mapping(encoder);
  }
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
+void i915_redisable_vga_power_on(struct drm_i915_private *i915)
  {
-	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+	i915_reg_t vga_reg = i915_vgacntrl_reg(i915);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
-		i915_disable_vga(dev_priv);
+		i915_disable_vga(i915);
  	}
  }
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
+void i915_redisable_vga(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref;
@@ -16405,23 +16405,23 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
  	 * follow the "don't touch the power well if we don't need it" policy
  	 * the rest of the driver uses.
  	 */
-	wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_VGA);
  	if (!wakeref)
  		return;
- i915_redisable_vga_power_on(dev_priv);
+	i915_redisable_vga_power_on(i915);
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
  }
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *dev_priv)
+static void readout_plane_state(struct drm_i915_private *i915)
  {
  	struct intel_plane *plane;
  	struct intel_crtc *crtc;
- for_each_intel_plane(&dev_priv->drm, plane) {
+	for_each_intel_plane(&i915->drm, plane) {
  		struct intel_plane_state *plane_state =
  			to_intel_plane_state(plane->base.state);
  		struct intel_crtc_state *crtc_state;
@@ -16430,7 +16430,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
visible = plane->get_hw_state(plane, &pipe); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+		crtc = intel_get_crtc_for_pipe(i915, pipe);
  		crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
@@ -16440,7 +16440,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
  			      enableddisabled(visible), pipe_name(pipe));
  	}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
@@ -16450,7 +16450,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) static void intel_modeset_readout_hw_state(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum pipe pipe;
  	struct intel_crtc *crtc;
  	struct intel_encoder *encoder;
@@ -16458,7 +16458,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
  	struct drm_connector_list_iter conn_iter;
  	int i;
- dev_priv->active_crtcs = 0;
+	i915->active_crtcs = 0;
for_each_intel_crtc(dev, crtc) {
  		struct intel_crtc_state *crtc_state =
@@ -16469,25 +16469,25 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
  		__drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
crtc_state->base.active = crtc_state->base.enable =
-			dev_priv->display.get_pipe_config(crtc, crtc_state);
+			i915->display.get_pipe_config(crtc, crtc_state);
crtc->base.enabled = crtc_state->base.enable;
  		crtc->active = crtc_state->base.active;
if (crtc_state->base.active)
-			dev_priv->active_crtcs |= 1 << crtc->pipe;
+			i915->active_crtcs |= 1 << crtc->pipe;
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
  			      crtc->base.base.id, crtc->base.name,
  			      enableddisabled(crtc_state->base.active));
  	}
- readout_plane_state(dev_priv);
+	readout_plane_state(i915);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+	for (i = 0; i < i915->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &i915->shared_dplls[i];
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
+		pll->on = pll->info->funcs->get_hw_state(i915, pll,
  							&pll->state.hw_state);
  		pll->state.crtc_mask = 0;
  		for_each_intel_crtc(dev, crtc) {
@@ -16510,7 +16510,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
  		if (encoder->get_hw_state(encoder, &pipe)) {
  			struct intel_crtc_state *crtc_state;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+			crtc = intel_get_crtc_for_pipe(i915, pipe);
  			crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
@@ -16558,7 +16558,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
  		struct intel_bw_state *bw_state =
-			to_intel_bw_state(dev_priv->bw_obj.state);
+			to_intel_bw_state(i915->bw_obj.state);
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		struct intel_plane *plane;
@@ -16585,7 +16585,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state); - if (dev_priv->display.modeset_calc_cdclk) {
+			if (i915->display.modeset_calc_cdclk) {
  				min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
  				if (WARN_ON(min_cdclk < 0))
  					min_cdclk = 0;
@@ -16596,11 +16596,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
  			update_scanline_offset(crtc_state);
  		}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
-		dev_priv->min_voltage_level[crtc->pipe] =
+		i915->min_cdclk[crtc->pipe] = min_cdclk;
+		i915->min_voltage_level[crtc->pipe] =
  			crtc_state->min_voltage_level;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  			const struct intel_plane_state *plane_state =
  				to_intel_plane_state(plane->base.state);
@@ -16615,16 +16615,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_bw_crtc_update(bw_state, crtc_state); - intel_pipe_config_sanity_check(dev_priv, crtc_state);
+		intel_pipe_config_sanity_check(i915, crtc_state);
  	}
  }
static void
-get_encoder_power_domains(struct drm_i915_private *dev_priv)
+get_encoder_power_domains(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -16642,14 +16642,14 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
  	}
  }
-static void intel_early_display_was(struct drm_i915_private *dev_priv)
+static void intel_early_display_was(struct drm_i915_private *i915)
  {
  	/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
-	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+	if (IS_CANNONLAKE(i915) || IS_GEMINILAKE(i915))
  		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
  			   DARBF_GATING_DIS);
- if (IS_HASWELL(dev_priv)) {
+	if (IS_HASWELL(i915)) {
  		/*
  		 * WaRsPkgCStateDisplayPMReq:hsw
  		 * System hang if this isn't done before disabling all planes!
@@ -16659,7 +16659,7 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
  	}
  }
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *i915,
  				       enum port port, i915_reg_t hdmi_reg)
  {
  	u32 val = I915_READ(hdmi_reg);
@@ -16677,7 +16677,7 @@ static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
  	I915_WRITE(hdmi_reg, val);
  }
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *i915,
  				     enum port port, i915_reg_t dp_reg)
  {
  	u32 val = I915_READ(dp_reg);
@@ -16695,7 +16695,7 @@ static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
  	I915_WRITE(dp_reg, val);
  }
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct drm_i915_private *i915)
  {
  	/*
  	 * The BIOS may select transcoder B on some of the PCH
@@ -16708,14 +16708,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
  	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
  	 * intel_disable_sdvo()).
  	 */
-	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
-	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
-	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+	ibx_sanitize_pch_dp_port(i915, PORT_B, PCH_DP_B);
+	ibx_sanitize_pch_dp_port(i915, PORT_C, PCH_DP_C);
+	ibx_sanitize_pch_dp_port(i915, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
-	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
-	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
-	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+	ibx_sanitize_pch_hdmi_port(i915, PORT_B, PCH_HDMIB);
+	ibx_sanitize_pch_hdmi_port(i915, PORT_C, PCH_HDMIC);
+	ibx_sanitize_pch_hdmi_port(i915, PORT_D, PCH_HDMID);
  }
/* Scan out the current hw modeset state,
@@ -16725,29 +16725,29 @@ static void
  intel_modeset_setup_hw_state(struct drm_device *dev,
  			     struct drm_modeset_acquire_ctx *ctx)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc_state *crtc_state;
  	struct intel_encoder *encoder;
  	struct intel_crtc *crtc;
  	intel_wakeref_t wakeref;
  	int i;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_early_display_was(dev_priv);
+	intel_early_display_was(i915);
  	intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
-	get_encoder_power_domains(dev_priv);
+	get_encoder_power_domains(i915);
- if (HAS_PCH_IBX(dev_priv))
-		ibx_sanitize_pch_ports(dev_priv);
+	if (HAS_PCH_IBX(i915))
+		ibx_sanitize_pch_ports(i915);
/*
  	 * intel_sanitize_plane_mapping() may need to do vblank
  	 * waits, so we need vblank interrupts restored beforehand.
  	 */
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		crtc_state = to_intel_crtc_state(crtc->base.state);
drm_crtc_vblank_reset(&crtc->base);
@@ -16756,12 +16756,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
  			intel_crtc_vblank_on(crtc_state);
  	}
- intel_sanitize_plane_mapping(dev_priv);
+	intel_sanitize_plane_mapping(i915);
for_each_intel_encoder(dev, encoder)
  		intel_sanitize_encoder(encoder);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		crtc_state = to_intel_crtc_state(crtc->base.state);
  		intel_sanitize_crtc(crtc, ctx);
  		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
@@ -16769,8 +16769,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+	for (i = 0; i < i915->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &i915->shared_dplls[i];
if (!pll->on || pll->active_mask)
  			continue;
@@ -16778,20 +16778,20 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
  		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
  			      pll->info->name);
- pll->info->funcs->disable(dev_priv, pll);
+		pll->info->funcs->disable(i915, pll);
  		pll->on = false;
  	}
- if (IS_G4X(dev_priv)) {
-		g4x_wm_get_hw_state(dev_priv);
-		g4x_wm_sanitize(dev_priv);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_wm_get_hw_state(dev_priv);
-		vlv_wm_sanitize(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 9) {
-		skl_wm_get_hw_state(dev_priv);
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		ilk_wm_get_hw_state(dev_priv);
+	if (IS_G4X(i915)) {
+		g4x_wm_get_hw_state(i915);
+		g4x_wm_sanitize(i915);
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		vlv_wm_get_hw_state(i915);
+		vlv_wm_sanitize(i915);
+	} else if (INTEL_GEN(i915) >= 9) {
+		skl_wm_get_hw_state(i915);
+	} else if (HAS_PCH_SPLIT(i915)) {
+		ilk_wm_get_hw_state(i915);
  	}
for_each_intel_crtc(dev, crtc) {
@@ -16800,22 +16800,22 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
  		crtc_state = to_intel_crtc_state(crtc->base.state);
  		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
  		if (WARN_ON(put_domains))
-			modeset_put_power_domains(dev_priv, put_domains);
+			modeset_put_power_domains(i915, put_domains);
  	}
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_fbc_init_pipe_state(dev_priv);
+	intel_fbc_init_pipe_state(i915);
  }
void intel_display_resume(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_atomic_state *state = i915->modeset_restore_state;
  	struct drm_modeset_acquire_ctx ctx;
  	int ret;
- dev_priv->modeset_restore_state = NULL;
+	i915->modeset_restore_state = NULL;
  	if (state)
  		state->acquire_ctx = &ctx;
@@ -16832,7 +16832,7 @@ void intel_display_resume(struct drm_device *dev)
  	if (!ret)
  		ret = __intel_display_resume(dev, state, &ctx);
- intel_enable_ipc(dev_priv);
+	intel_enable_ipc(i915);
  	drm_modeset_drop_locks(&ctx);
  	drm_modeset_acquire_fini(&ctx);
@@ -16862,19 +16862,19 @@ static void intel_hpd_poll_fini(struct drm_device *dev) void intel_modeset_cleanup(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- flush_workqueue(dev_priv->modeset_wq);
+	flush_workqueue(i915->modeset_wq);
- flush_work(&dev_priv->atomic_helper.free_work);
-	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+	flush_work(&i915->atomic_helper.free_work);
+	WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
/*
  	 * Interrupts and polling as the first thing to avoid creating havoc.
  	 * Too much stuff here (turning of connectors, ...) would
  	 * experience fancy races otherwise.
  	 */
-	intel_irq_uninstall(dev_priv);
+	intel_irq_uninstall(i915);
/*
  	 * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -16883,37 +16883,37 @@ void intel_modeset_cleanup(struct drm_device *dev)
  	intel_hpd_poll_fini(dev);
/* poll work can call into fbdev, hence clean that up afterwards */
-	intel_fbdev_fini(dev_priv);
+	intel_fbdev_fini(i915);
intel_unregister_dsm_handler(); - intel_fbc_global_disable(dev_priv);
+	intel_fbc_global_disable(i915);
/* flush any delayed tasks or pending work */
  	flush_scheduled_work();
- intel_hdcp_component_fini(dev_priv);
+	intel_hdcp_component_fini(i915);
drm_mode_config_cleanup(dev); - intel_overlay_cleanup(dev_priv);
+	intel_overlay_cleanup(i915);
- intel_gmbus_teardown(dev_priv);
+	intel_gmbus_teardown(i915);
- destroy_workqueue(dev_priv->modeset_wq);
+	destroy_workqueue(i915->modeset_wq);
- intel_fbc_cleanup_cfb(dev_priv);
+	intel_fbc_cleanup_cfb(i915);
  }
/*
   * set vga decode state - true == enable VGA decode
   */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *i915, bool state)
  {
-	unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+	unsigned reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  	u16 gmch_ctrl;
- if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+	if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
  		DRM_ERROR("failed to read control word\n");
  		return -EIO;
  	}
@@ -16926,7 +16926,7 @@ int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
  	else
  		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+	if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
  		DRM_ERROR("failed to write control word\n");
  		return -EIO;
  	}
@@ -16980,7 +16980,7 @@ struct intel_display_error_state {
  };
struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv)
+intel_display_capture_error_state(struct drm_i915_private *i915)
  {
  	struct intel_display_error_state *error;
  	int transcoders[] = {
@@ -16993,19 +16993,19 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); - if (!HAS_DISPLAY(dev_priv))
+	if (!HAS_DISPLAY(i915))
  		return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
  	if (error == NULL)
  		return NULL;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
- for_each_pipe(dev_priv, i) {
+	for_each_pipe(i915, i) {
  		error->pipe[i].power_domain_on =
-			__intel_display_power_is_enabled(dev_priv,
+			__intel_display_power_is_enabled(i915,
  							 POWER_DOMAIN_PIPE(i));
  		if (!error->pipe[i].power_domain_on)
  			continue;
@@ -17016,32 +17016,32 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->plane[i].control = I915_READ(DSPCNTR(i));
  		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-		if (INTEL_GEN(dev_priv) <= 3) {
+		if (INTEL_GEN(i915) <= 3) {
  			error->plane[i].size = I915_READ(DSPSIZE(i));
  			error->plane[i].pos = I915_READ(DSPPOS(i));
  		}
-		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+		if (INTEL_GEN(i915) <= 7 && !IS_HASWELL(i915))
  			error->plane[i].addr = I915_READ(DSPADDR(i));
-		if (INTEL_GEN(dev_priv) >= 4) {
+		if (INTEL_GEN(i915) >= 4) {
  			error->plane[i].surface = I915_READ(DSPSURF(i));
  			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  		}
error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH(dev_priv))
+		if (HAS_GMCH(i915))
  			error->pipe[i].stat = I915_READ(PIPESTAT(i));
  	}
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
  		enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+		if (!INTEL_INFO(i915)->trans_offsets[cpu_transcoder])
  			continue;
error->transcoder[i].available = true;
  		error->transcoder[i].power_domain_on =
-			__intel_display_power_is_enabled(dev_priv,
+			__intel_display_power_is_enabled(i915,
  				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  		if (!error->transcoder[i].power_domain_on)
  			continue;
@@ -17066,17 +17066,17 @@ void
  intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  				struct intel_display_error_state *error)
  {
-	struct drm_i915_private *dev_priv = m->i915;
+	struct drm_i915_private *i915 = m->i915;
  	int i;
if (!error)
  		return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(i915)->num_pipes);
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		err_printf(m, "PWR_WELL_CTL2: %08x\n",
  			   error->power_well_driver);
-	for_each_pipe(dev_priv, i) {
+	for_each_pipe(i915, i) {
  		err_printf(m, "Pipe [%d]:\n", i);
  		err_printf(m, "  Power: %s\n",
  			   onoff(error->pipe[i].power_domain_on));
@@ -17086,13 +17086,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  		err_printf(m, "Plane [%d]:\n", i);
  		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
  		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
-		if (INTEL_GEN(dev_priv) <= 3) {
+		if (INTEL_GEN(i915) <= 3) {
  			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
  			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
  		}
-		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+		if (INTEL_GEN(i915) <= 7 && !IS_HASWELL(i915))
  			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
-		if (INTEL_GEN(dev_priv) >= 4) {
+		if (INTEL_GEN(i915) >= 4) {
  			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
  			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
  		}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index ee6b8194a459..7e4b4b1c0ac6 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -125,7 +125,7 @@ enum i9xx_plane_id {
  };
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(i915)->num_sprites[(p)] + (s) + 'A')
/*
   * Per-pipe plane identifier.
@@ -353,8 +353,8 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
  			    struct intel_link_m_n *m_n,
  			    bool constant_n);
  bool is_ccs_modifier(u64 modifier);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+void lpt_disable_clkout_dp(struct drm_i915_private *i915);
+u32 intel_plane_fb_max_stride(struct drm_i915_private *i915,
  			      u32 pixel_format, u64 modifier);
  bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c
index bb9ef1cea5db..9acfad5c83ef 100644
--- a/drivers/gpu/drm/i915/intel_display_power.c
+++ b/drivers/gpu/drm/i915/intel_display_power.c
@@ -17,7 +17,7 @@
  #include "intel_hotplug.h"
  #include "intel_sideband.h"
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct drm_i915_private *i915,
  					 enum i915_power_well_id power_well_id);
const char *
@@ -122,42 +122,42 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
  	}
  }
-static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+static void intel_power_well_enable(struct drm_i915_private *i915,
  				    struct i915_power_well *power_well)
  {
  	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
-	power_well->desc->ops->enable(dev_priv, power_well);
+	power_well->desc->ops->enable(i915, power_well);
  	power_well->hw_enabled = true;
  }
-static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+static void intel_power_well_disable(struct drm_i915_private *i915,
  				     struct i915_power_well *power_well)
  {
  	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
  	power_well->hw_enabled = false;
-	power_well->desc->ops->disable(dev_priv, power_well);
+	power_well->desc->ops->disable(i915, power_well);
  }
-static void intel_power_well_get(struct drm_i915_private *dev_priv,
+static void intel_power_well_get(struct drm_i915_private *i915,
  				 struct i915_power_well *power_well)
  {
  	if (!power_well->count++)
-		intel_power_well_enable(dev_priv, power_well);
+		intel_power_well_enable(i915, power_well);
  }
-static void intel_power_well_put(struct drm_i915_private *dev_priv,
+static void intel_power_well_put(struct drm_i915_private *i915,
  				 struct i915_power_well *power_well)
  {
  	WARN(!power_well->count, "Use count on power well %s is already zero",
  	     power_well->desc->name);
if (!--power_well->count)
-		intel_power_well_disable(dev_priv, power_well);
+		intel_power_well_disable(i915, power_well);
  }
/**
   * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to check
   *
   * This is the unlocked version of intel_display_power_is_enabled() and should
@@ -167,18 +167,18 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv,
   * Returns:
   * True when the power domain is enabled, false otherwise.
   */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool __intel_display_power_is_enabled(struct drm_i915_private *i915,
  				      enum intel_display_power_domain domain)
  {
  	struct i915_power_well *power_well;
  	bool is_enabled;
- if (dev_priv->runtime_pm.suspended)
+	if (i915->runtime_pm.suspended)
  		return false;
is_enabled = true; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+	for_each_power_domain_well_reverse(i915, power_well, BIT_ULL(domain)) {
  		if (power_well->desc->always_on)
  			continue;
@@ -193,7 +193,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, /**
   * intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to check
   *
   * This function can be used to check the hw power domain state. It is mostly
@@ -208,16 +208,16 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
   * Returns:
   * True when the power domain is enabled, false otherwise.
   */
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct drm_i915_private *i915,
  				    enum intel_display_power_domain domain)
  {
  	struct i915_power_domains *power_domains;
  	bool ret;
- power_domains = &dev_priv->power_domains;
+	power_domains = &i915->power_domains;
mutex_lock(&power_domains->lock);
-	ret = __intel_display_power_is_enabled(dev_priv, domain);
+	ret = __intel_display_power_is_enabled(i915, domain);
  	mutex_unlock(&power_domains->lock);
return ret;
@@ -229,10 +229,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
   * to be enabled, and it will only be disabled if none of the registers is
   * requesting it to be enabled.
   */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_post_enable(struct drm_i915_private *i915,
  				       u8 irq_pipe_mask, bool has_vga)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
/*
  	 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -251,31 +251,31 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
  	}
if (irq_pipe_mask)
-		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+		gen8_irq_power_well_post_enable(i915, irq_pipe_mask);
  }
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_pre_disable(struct drm_i915_private *i915,
  				       u8 irq_pipe_mask)
  {
  	if (irq_pipe_mask)
-		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+		gen8_irq_power_well_pre_disable(i915, irq_pipe_mask);
  }
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
  	int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
-	WARN_ON(intel_wait_for_register(&dev_priv->uncore,
+	WARN_ON(intel_wait_for_register(&i915->uncore,
  					regs->driver,
  					HSW_PWR_WELL_CTL_STATE(pw_idx),
  					HSW_PWR_WELL_CTL_STATE(pw_idx),
  					1));
  }
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+static u32 hsw_power_well_requesters(struct drm_i915_private *i915,
  				     const struct i915_power_well_regs *regs,
  				     int pw_idx)
  {
@@ -291,7 +291,7 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
  	return ret;
  }
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *i915,
  					    struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -310,7 +310,7 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
  	 */
  	wait_for((disabled = !(I915_READ(regs->driver) &
  			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
-		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+		 (reqs = hsw_power_well_requesters(i915, regs, pw_idx)), 1);
  	if (disabled)
  		return;
@@ -319,16 +319,16 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
  		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
  }
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *i915,
  					   enum skl_power_gate pg)
  {
  	/* Timeout 5us for PG#0, for other PGs 1us */
-	WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
+	WARN_ON(intel_wait_for_register(&i915->uncore, SKL_FUSE_STATUS,
  					SKL_FUSE_PG_DIST_STATUS(pg),
  					SKL_FUSE_PG_DIST_STATUS(pg), 1));
  }
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_enable(struct drm_i915_private *i915,
  				  struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -338,7 +338,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  	u32 val;
if (wait_fuses) {
-		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+		pg = INTEL_GEN(i915) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
  						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
  		/*
  		 * For PW1 we have to wait both for the PW0/PG0 fuse state
@@ -348,15 +348,15 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  		 * after the enabling.
  		 */
  		if (pg == SKL_PG1)
-			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+			gen9_wait_for_power_well_fuses(i915, SKL_PG0);
  	}
val = I915_READ(regs->driver);
  	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-	hsw_wait_for_power_well_enable(dev_priv, power_well);
+	hsw_wait_for_power_well_enable(i915, power_well);
/* Display WA #1178: cnl */
-	if (IS_CANNONLAKE(dev_priv) &&
+	if (IS_CANNONLAKE(i915) &&
  	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
  	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
  		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
@@ -365,32 +365,32 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  	}
if (wait_fuses)
-		gen9_wait_for_power_well_fuses(dev_priv, pg);
+		gen9_wait_for_power_well_fuses(i915, pg);
- hsw_power_well_post_enable(dev_priv,
+	hsw_power_well_post_enable(i915,
  				   power_well->desc->hsw.irq_pipe_mask,
  				   power_well->desc->hsw.has_vga);
  }
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_disable(struct drm_i915_private *i915,
  				   struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
  	int pw_idx = power_well->desc->hsw.idx;
  	u32 val;
- hsw_power_well_pre_disable(dev_priv,
+	hsw_power_well_pre_disable(i915,
  				   power_well->desc->hsw.irq_pipe_mask);
val = I915_READ(regs->driver);
  	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-	hsw_wait_for_power_well_disable(dev_priv, power_well);
+	hsw_wait_for_power_well_disable(i915, power_well);
  }
#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *i915,
  				    struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -404,12 +404,12 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
  	val = I915_READ(ICL_PORT_CL_DW12(port));
  	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
- hsw_wait_for_power_well_enable(dev_priv, power_well);
+	hsw_wait_for_power_well_enable(i915, power_well);
/* Display WA #1178: icl */
-	if (IS_ICELAKE(dev_priv) &&
+	if (IS_ICELAKE(i915) &&
  	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
-	    !intel_bios_is_port_edp(dev_priv, port)) {
+	    !intel_bios_is_port_edp(i915, port)) {
  		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
  		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
  		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
@@ -417,7 +417,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
  }
static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *i915,
  				     struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -431,14 +431,14 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
  	val = I915_READ(regs->driver);
  	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+	hsw_wait_for_power_well_disable(i915, power_well);
  }
#define ICL_AUX_PW_TO_CH(pw_idx) \
  	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *i915,
  				 struct i915_power_well *power_well)
  {
  	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
@@ -450,7 +450,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
  		val |= DP_AUX_CH_CTL_TBT_IO;
  	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
- hsw_power_well_enable(dev_priv, power_well);
+	hsw_power_well_enable(i915, power_well);
  }
/*
@@ -458,7 +458,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
   * enable it, so check if it's enabled and also check if we've requested it to
   * be enabled.
   */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool hsw_power_well_enabled(struct drm_i915_private *i915,
  				   struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -476,14 +476,14 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  	 * BIOS's own request bits, which are forced-on for these power wells
  	 * when exiting DC5/6.
  	 */
-	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+	if (IS_GEN(i915, 9) && !IS_GEN9_LP(i915) &&
  	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
  		val |= I915_READ(regs->bios);
return (val & mask) == mask;
  }
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc9(struct drm_i915_private *i915)
  {
  	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  		  "DC9 already programmed to be enabled.\n");
@@ -492,7 +492,7 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
  		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
  		  "Power well 2 on.\n");
-	WARN_ONCE(intel_irqs_enabled(dev_priv),
+	WARN_ONCE(intel_irqs_enabled(i915),
  		  "Interrupts not disabled yet.\n");
/*
@@ -504,9 +504,9 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  	  */
  }
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+static void assert_can_disable_dc9(struct drm_i915_private *i915)
  {
-	WARN_ONCE(intel_irqs_enabled(dev_priv),
+	WARN_ONCE(intel_irqs_enabled(i915),
  		  "Interrupts not disabled yet.\n");
  	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  		  "DC5 still not disabled.\n");
@@ -520,7 +520,7 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  	  */
  }
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+static void gen9_write_dc_state(struct drm_i915_private *i915,
  				u32 state)
  {
  	int rewrites = 0;
@@ -557,14 +557,14 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
  			      state, rewrites);
  }
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+static u32 gen9_dc_mask(struct drm_i915_private *i915)
  {
  	u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		mask |= DC_STATE_EN_DC9;
  	else
  		mask |= DC_STATE_EN_UPTO_DC6;
@@ -572,20 +572,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
  	return mask;
  }
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+void gen9_sanitize_dc_state(struct drm_i915_private *i915)
  {
  	u32 val;
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(i915);
DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
-		      dev_priv->csr.dc_state, val);
-	dev_priv->csr.dc_state = val;
+		      i915->csr.dc_state, val);
+	i915->csr.dc_state = val;
  }
/**
   * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @state: target DC power state
   * - DC_STATE_DISABLE
   * - DC_STATE_EN_UPTO_DC5
@@ -606,35 +606,35 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
   * back on and register state is restored. This is guaranteed by the MMIO write
   * to DC_STATE_EN blocking until the state is restored.
   */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+static void gen9_set_dc_state(struct drm_i915_private *i915, u32 state)
  {
  	u32 val;
  	u32 mask;
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
-		state &= dev_priv->csr.allowed_dc_mask;
+	if (WARN_ON_ONCE(state & ~i915->csr.allowed_dc_mask))
+		state &= i915->csr.allowed_dc_mask;
val = I915_READ(DC_STATE_EN);
-	mask = gen9_dc_mask(dev_priv);
+	mask = gen9_dc_mask(i915);
  	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
  		      val & mask, state);
/* Check if DMC is ignoring our DC state requests */
-	if ((val & mask) != dev_priv->csr.dc_state)
+	if ((val & mask) != i915->csr.dc_state)
  		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
-			  dev_priv->csr.dc_state, val & mask);
+			  i915->csr.dc_state, val & mask);
val &= ~mask;
  	val |= state;
- gen9_write_dc_state(dev_priv, val);
+	gen9_write_dc_state(i915, val);
- dev_priv->csr.dc_state = val & mask;
+	i915->csr.dc_state = val & mask;
  }
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+void bxt_enable_dc9(struct drm_i915_private *i915)
  {
-	assert_can_enable_dc9(dev_priv);
+	assert_can_enable_dc9(i915);
DRM_DEBUG_KMS("Enabling DC9\n");
  	/*
@@ -642,23 +642,23 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  	 * platforms with South Display Engine on PCH,
  	 * because PPS registers are always on.
  	 */
-	if (!HAS_PCH_SPLIT(dev_priv))
-		intel_power_sequencer_reset(dev_priv);
-	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+	if (!HAS_PCH_SPLIT(i915))
+		intel_power_sequencer_reset(i915);
+	gen9_set_dc_state(i915, DC_STATE_EN_DC9);
  }
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+void bxt_disable_dc9(struct drm_i915_private *i915)
  {
-	assert_can_disable_dc9(dev_priv);
+	assert_can_disable_dc9(i915);
DRM_DEBUG_KMS("Disabling DC9\n"); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
- intel_pps_unlock_regs_wa(dev_priv);
+	intel_pps_unlock_regs_wa(i915);
  }
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+static void assert_csr_loaded(struct drm_i915_private *i915)
  {
  	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
  		  "CSR program storage start is NULL\n");
@@ -667,12 +667,12 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
  }
static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
+lookup_power_well(struct drm_i915_private *i915,
  		  enum i915_power_well_id power_well_id)
  {
  	struct i915_power_well *power_well;
- for_each_power_well(dev_priv, power_well)
+	for_each_power_well(i915, power_well)
  		if (power_well->desc->id == power_well_id)
  			return power_well;
@@ -684,62 +684,62 @@ lookup_power_well(struct drm_i915_private *dev_priv,
  	 * our driver.
  	 */
  	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
-	return &dev_priv->power_domains.power_wells[0];
+	return &i915->power_domains.power_wells[0];
  }
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc5(struct drm_i915_private *i915)
  {
-	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+	bool pg2_enabled = intel_display_power_well_is_enabled(i915,
  					SKL_DISP_PW_2);
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
  		  "DC5 already programmed to be enabled.\n");
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(i915);
- assert_csr_loaded(dev_priv);
+	assert_csr_loaded(i915);
  }
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct drm_i915_private *i915)
  {
-	assert_can_enable_dc5(dev_priv);
+	assert_can_enable_dc5(i915);
DRM_DEBUG_KMS("Enabling DC5\n"); /* Wa Display #1183: skl,kbl,cfl */
-	if (IS_GEN9_BC(dev_priv))
+	if (IS_GEN9_BC(i915))
  		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
  			   SKL_SELECT_ALTERNATE_DC_EXIT);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+	gen9_set_dc_state(i915, DC_STATE_EN_UPTO_DC5);
  }
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc6(struct drm_i915_private *i915)
  {
  	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  		  "Backlight is not disabled.\n");
  	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
  		  "DC6 already programmed to be enabled.\n");
- assert_csr_loaded(dev_priv);
+	assert_csr_loaded(i915);
  }
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *i915)
  {
-	assert_can_enable_dc6(dev_priv);
+	assert_can_enable_dc6(i915);
DRM_DEBUG_KMS("Enabling DC6\n"); /* Wa Display #1183: skl,kbl,cfl */
-	if (IS_GEN9_BC(dev_priv))
+	if (IS_GEN9_BC(i915))
  		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
  			   SKL_SELECT_ALTERNATE_DC_EXIT);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+	gen9_set_dc_state(i915, DC_STATE_EN_UPTO_DC6);
  }
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void hsw_power_well_sync_hw(struct drm_i915_private *i915,
  				   struct i915_power_well *power_well)
  {
  	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
@@ -757,52 +757,52 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  	}
  }
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
-	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
+	bxt_ddi_phy_init(i915, power_well->desc->bxt.phy);
  }
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *i915,
  					    struct i915_power_well *power_well)
  {
-	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
+	bxt_ddi_phy_uninit(i915, power_well->desc->bxt.phy);
  }
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *i915,
  					    struct i915_power_well *power_well)
  {
-	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
+	return bxt_ddi_phy_is_enabled(i915, power_well->desc->bxt.phy);
  }
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *i915)
  {
  	struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+	power_well = lookup_power_well(i915, BXT_DISP_PW_DPIO_CMN_A);
  	if (power_well->count > 0)
-		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+		bxt_ddi_phy_verify_state(i915, power_well->desc->bxt.phy);
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+	power_well = lookup_power_well(i915, VLV_DISP_PW_DPIO_CMN_BC);
  	if (power_well->count > 0)
-		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+		bxt_ddi_phy_verify_state(i915, power_well->desc->bxt.phy);
- if (IS_GEMINILAKE(dev_priv)) {
-		power_well = lookup_power_well(dev_priv,
+	if (IS_GEMINILAKE(i915)) {
+		power_well = lookup_power_well(i915,
  					       GLK_DISP_PW_DPIO_CMN_C);
  		if (power_well->count > 0)
-			bxt_ddi_phy_verify_state(dev_priv,
+			bxt_ddi_phy_verify_state(i915,
  						 power_well->desc->bxt.phy);
  	}
  }
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
  	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
  }
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *i915)
  {
  	u32 tmp = I915_READ(DBUF_CTL);
@@ -811,92 +811,92 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
  	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
  }
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *i915,
  					  struct i915_power_well *power_well)
  {
  	struct intel_cdclk_state cdclk_state = {};
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+	i915->display.get_cdclk(i915, &cdclk_state);
  	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
-	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+	WARN_ON(intel_cdclk_needs_modeset(&i915->cdclk.hw, &cdclk_state));
- gen9_assert_dbuf_enabled(dev_priv);
+	gen9_assert_dbuf_enabled(i915);
- if (IS_GEN9_LP(dev_priv))
-		bxt_verify_ddi_phy_power_wells(dev_priv);
+	if (IS_GEN9_LP(i915))
+		bxt_verify_ddi_phy_power_wells(i915);
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		/*
  		 * DMC retains HW context only for port A, the other combo
  		 * PHY's HW context for port B is lost after DC transitions,
  		 * so we need to restore it manually.
  		 */
-		intel_combo_phy_init(dev_priv);
+		intel_combo_phy_init(i915);
  }
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
-	if (!dev_priv->csr.dmc_payload)
+	if (!i915->csr.dmc_payload)
  		return;
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
-		skl_enable_dc6(dev_priv);
-	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
-		gen9_enable_dc5(dev_priv);
+	if (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+		skl_enable_dc6(i915);
+	else if (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+		gen9_enable_dc5(i915);
  }
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *i915,
  					 struct i915_power_well *power_well)
  {
  }
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
  }
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *i915,
  					     struct i915_power_well *power_well)
  {
  	return true;
  }
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_enable(struct drm_i915_private *i915,
  					 struct i915_power_well *power_well)
  {
  	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
-		i830_enable_pipe(dev_priv, PIPE_A);
+		i830_enable_pipe(i915, PIPE_A);
  	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
-		i830_enable_pipe(dev_priv, PIPE_B);
+		i830_enable_pipe(i915, PIPE_B);
  }
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_disable(struct drm_i915_private *i915,
  					  struct i915_power_well *power_well)
  {
-	i830_disable_pipe(dev_priv, PIPE_B);
-	i830_disable_pipe(dev_priv, PIPE_A);
+	i830_disable_pipe(i915, PIPE_B);
+	i830_disable_pipe(i915, PIPE_A);
  }
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *i915,
  					  struct i915_power_well *power_well)
  {
  	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
  		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
  }
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *i915,
  					  struct i915_power_well *power_well)
  {
  	if (power_well->count > 0)
-		i830_pipes_power_well_enable(dev_priv, power_well);
+		i830_pipes_power_well_enable(i915, power_well);
  	else
-		i830_pipes_power_well_disable(dev_priv, power_well);
+		i830_pipes_power_well_disable(i915, power_well);
  }
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+static void vlv_set_power_well(struct drm_i915_private *i915,
  			       struct i915_power_well *power_well, bool enable)
  {
  	int pw_idx = power_well->desc->vlv.idx;
@@ -908,43 +908,43 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
  			 PUNIT_PWRGT_PWR_GATE(pw_idx);
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
#define COND \
-	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+	((vlv_punit_read(i915, PUNIT_REG_PWRGT_STATUS) & mask) == state)
if (COND)
  		goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+	ctrl = vlv_punit_read(i915, PUNIT_REG_PWRGT_CTRL);
  	ctrl &= ~mask;
  	ctrl |= state;
-	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+	vlv_punit_write(i915, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
  		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  			  state,
-			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+			  vlv_punit_read(i915, PUNIT_REG_PWRGT_CTRL));
#undef COND out:
-	vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
  }
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_enable(struct drm_i915_private *i915,
  				  struct i915_power_well *power_well)
  {
-	vlv_set_power_well(dev_priv, power_well, true);
+	vlv_set_power_well(i915, power_well, true);
  }
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_disable(struct drm_i915_private *i915,
  				   struct i915_power_well *power_well)
  {
-	vlv_set_power_well(dev_priv, power_well, false);
+	vlv_set_power_well(i915, power_well, false);
  }
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool vlv_power_well_enabled(struct drm_i915_private *i915,
  				   struct i915_power_well *power_well)
  {
  	int pw_idx = power_well->desc->vlv.idx;
@@ -956,9 +956,9 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  	mask = PUNIT_PWRGT_MASK(pw_idx);
  	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+	state = vlv_punit_read(i915, PUNIT_REG_PWRGT_STATUS) & mask;
  	/*
  	 * We only ever set the power-on and power-gate states, anything
  	 * else is unexpected.
@@ -972,15 +972,15 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  	 * A transient state at this point would mean some unexpected party
  	 * is poking at the power controls too.
  	 */
-	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+	ctrl = vlv_punit_read(i915, PUNIT_REG_PWRGT_CTRL) & mask;
  	WARN_ON(ctrl != state);
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
return enabled;
  }
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_display_clock_gating(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -1001,13 +1001,13 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  	I915_WRITE(CBR1_VLV, 0);
- WARN_ON(dev_priv->rawclk_freq == 0);
+	WARN_ON(i915->rawclk_freq == 0);
I915_WRITE(RAWCLK_FREQ_VLV,
-		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+		   DIV_ROUND_CLOSEST(i915->rawclk_freq, 1000));
  }
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_init(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
  	enum pipe pipe;
@@ -1020,7 +1020,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  	 *
  	 * CHV DPLL B/C have some issues if VGA mode is enabled.
  	 */
-	for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1030,71 +1030,71 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  		I915_WRITE(DPLL(pipe), val);
  	}
- vlv_init_display_clock_gating(dev_priv);
+	vlv_init_display_clock_gating(i915);
- spin_lock_irq(&dev_priv->irq_lock);
-	valleyview_enable_display_irqs(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	valleyview_enable_display_irqs(i915);
+	spin_unlock_irq(&i915->irq_lock);
/*
  	 * During driver initialization/resume we can avoid restoring the
  	 * part of the HW/SW state that will be inited anyway explicitly.
  	 */
-	if (dev_priv->power_domains.initializing)
+	if (i915->power_domains.initializing)
  		return;
- intel_hpd_init(dev_priv);
+	intel_hpd_init(i915);
/* Re-enable the ADPA, if we have one */
-	for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		if (encoder->type == INTEL_OUTPUT_ANALOG)
  			intel_crt_reset(&encoder->base);
  	}
- i915_redisable_vga_power_on(dev_priv);
+	i915_redisable_vga_power_on(i915);
- intel_pps_unlock_regs_wa(dev_priv);
+	intel_pps_unlock_regs_wa(i915);
  }
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_deinit(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
-	valleyview_disable_display_irqs(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	valleyview_disable_display_irqs(i915);
+	spin_unlock_irq(&i915->irq_lock);
/* make sure we're done processing display irqs */
-	synchronize_irq(dev_priv->drm.irq);
+	synchronize_irq(i915->drm.irq);
- intel_power_sequencer_reset(dev_priv);
+	intel_power_sequencer_reset(i915);
/* Prevent us from re-enabling polling on accident in late suspend */
-	if (!dev_priv->drm.dev->power.is_suspended)
-		intel_hpd_poll_init(dev_priv);
+	if (!i915->drm.dev->power.is_suspended)
+		intel_hpd_poll_init(i915);
  }
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_enable(struct drm_i915_private *i915,
  					  struct i915_power_well *power_well)
  {
-	vlv_set_power_well(dev_priv, power_well, true);
+	vlv_set_power_well(i915, power_well, true);
- vlv_display_power_well_init(dev_priv);
+	vlv_display_power_well_init(i915);
  }
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_disable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
-	vlv_display_power_well_deinit(dev_priv);
+	vlv_display_power_well_deinit(i915);
- vlv_set_power_well(dev_priv, power_well, false);
+	vlv_set_power_well(i915, power_well, false);
  }
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
  	/* since ref/cri clock was enabled */
  	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+	vlv_set_power_well(i915, power_well, true);
/*
  	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
@@ -1110,31 +1110,31 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  }
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *i915,
  					    struct i915_power_well *power_well)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
-		assert_pll_disabled(dev_priv, pipe);
+	for_each_pipe(i915, pipe)
+		assert_pll_disabled(i915, pipe);
/* Assert common reset */
  	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
- vlv_set_power_well(dev_priv, power_well, false);
+	vlv_set_power_well(i915, power_well, false);
  }
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) -static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+static void assert_chv_phy_status(struct drm_i915_private *i915)
  {
  	struct i915_power_well *cmn_bc =
-		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+		lookup_power_well(i915, VLV_DISP_PW_DPIO_CMN_BC);
  	struct i915_power_well *cmn_d =
-		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
-	u32 phy_control = dev_priv->chv_phy_control;
+		lookup_power_well(i915, CHV_DISP_PW_DPIO_CMN_D);
+	u32 phy_control = i915->chv_phy_control;
  	u32 phy_status = 0;
  	u32 phy_status_mask = 0xffffffff;
@@ -1145,7 +1145,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  	 * reset (ie. the power well has been disabled at
  	 * least once).
  	 */
-	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+	if (!i915->chv_phy_assert[DPIO_PHY0])
  		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
@@ -1153,12 +1153,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+	if (!i915->chv_phy_assert[DPIO_PHY1])
  		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
  				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+	if (cmn_bc->desc->ops->is_enabled(i915, cmn_bc)) {
  		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */
@@ -1199,7 +1199,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
  	}
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+	if (cmn_d->desc->ops->is_enabled(i915, cmn_d)) {
  		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */
@@ -1224,19 +1224,19 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  	 * The PHY may be busy with some initial calibration and whatnot,
  	 * so the power state can take a while to actually change.
  	 */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DISPLAY_PHY_STATUS,
  				    phy_status_mask,
  				    phy_status,
  				    10))
  		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
  			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
-			   phy_status, dev_priv->chv_phy_control);
+			   phy_status, i915->chv_phy_control);
  }
#undef BITS_SET -static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *i915,
  					   struct i915_power_well *power_well)
  {
  	enum dpio_phy phy;
@@ -1256,51 +1256,51 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* since ref/cri clock was enabled */
  	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-	vlv_set_power_well(dev_priv, power_well, true);
+	vlv_set_power_well(i915, power_well, true);
/* Poll for phypwrgood signal */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DISPLAY_PHY_STATUS,
  				    PHY_POWERGOOD(phy),
  				    PHY_POWERGOOD(phy),
  				    1))
  		DRM_ERROR("Display PHY %d is not power up\n", phy);
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Enable dynamic power down */
-	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+	tmp = vlv_dpio_read(i915, pipe, CHV_CMN_DW28);
  	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
  		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW28, tmp);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
-		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+		tmp = vlv_dpio_read(i915, pipe, _CHV_CMN_DW6_CH1);
  		tmp |= DPIO_DYNPWRDOWNEN_CH1;
-		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+		vlv_dpio_write(i915, pipe, _CHV_CMN_DW6_CH1, tmp);
  	} else {
  		/*
  		 * Force the non-existing CL2 off. BXT does this
  		 * too, so maybe it saves some power even though
  		 * CL2 doesn't exist?
  		 */
-		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+		tmp = vlv_dpio_read(i915, pipe, CHV_CMN_DW30);
  		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
-		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+		vlv_dpio_write(i915, pipe, CHV_CMN_DW30, tmp);
  	}
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	i915->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+	I915_WRITE(DISPLAY_PHY_CONTROL, i915->chv_phy_control);
DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-		      phy, dev_priv->chv_phy_control);
+		      phy, i915->chv_phy_control);
- assert_chv_phy_status(dev_priv);
+	assert_chv_phy_status(i915);
  }
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *i915,
  					    struct i915_power_well *power_well)
  {
  	enum dpio_phy phy;
@@ -1310,28 +1310,28 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
  		phy = DPIO_PHY0;
-		assert_pll_disabled(dev_priv, PIPE_A);
-		assert_pll_disabled(dev_priv, PIPE_B);
+		assert_pll_disabled(i915, PIPE_A);
+		assert_pll_disabled(i915, PIPE_B);
  	} else {
  		phy = DPIO_PHY1;
-		assert_pll_disabled(dev_priv, PIPE_C);
+		assert_pll_disabled(i915, PIPE_C);
  	}
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	i915->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+	I915_WRITE(DISPLAY_PHY_CONTROL, i915->chv_phy_control);
- vlv_set_power_well(dev_priv, power_well, false);
+	vlv_set_power_well(i915, power_well, false);
DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-		      phy, dev_priv->chv_phy_control);
+		      phy, i915->chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
-	dev_priv->chv_phy_assert[phy] = true;
+	i915->chv_phy_assert[phy] = true;
- assert_chv_phy_status(dev_priv);
+	assert_chv_phy_status(i915);
  }
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+static void assert_chv_phy_powergate(struct drm_i915_private *i915, enum dpio_phy phy,
  				     enum dpio_channel ch, bool override, unsigned int mask)
  {
  	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
@@ -1344,7 +1344,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
  	 * reset (ie. the power well has been disabled at
  	 * least once).
  	 */
-	if (!dev_priv->chv_phy_assert[phy])
+	if (!i915->chv_phy_assert[phy])
  		return;
if (ch == DPIO_CH0)
@@ -1352,9 +1352,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
  	else
  		reg = _CHV_CMN_DW6_CH1;
- vlv_dpio_get(dev_priv);
-	val = vlv_dpio_read(dev_priv, pipe, reg);
-	vlv_dpio_put(dev_priv);
+	vlv_dpio_get(i915);
+	val = vlv_dpio_read(i915, pipe, reg);
+	vlv_dpio_put(i915);
/*
  	 * This assumes !override is only used when the port is disabled.
@@ -1393,30 +1393,30 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
  	     reg, val);
  }
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct drm_i915_private *i915, enum dpio_phy phy,
  			  enum dpio_channel ch, bool override)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	bool was_override;
mutex_lock(&power_domains->lock); - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+	was_override = i915->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
if (override == was_override)
  		goto out;
if (override)
-		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+		i915->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  	else
-		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+		i915->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	I915_WRITE(DISPLAY_PHY_CONTROL, i915->chv_phy_control);
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
-		      phy, ch, dev_priv->chv_phy_control);
+		      phy, ch, i915->chv_phy_control);
- assert_chv_phy_status(dev_priv);
+	assert_chv_phy_status(i915);
out:
  	mutex_unlock(&power_domains->lock);
@@ -1427,43 +1427,43 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  			     bool override, unsigned int mask)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
  	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
mutex_lock(&power_domains->lock); - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
-	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+	i915->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+	i915->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
if (override)
-		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+		i915->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  	else
-		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+		i915->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	I915_WRITE(DISPLAY_PHY_CONTROL, i915->chv_phy_control);
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
-		      phy, ch, mask, dev_priv->chv_phy_control);
+		      phy, ch, mask, i915->chv_phy_control);
- assert_chv_phy_status(dev_priv);
+	assert_chv_phy_status(i915);
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+	assert_chv_phy_powergate(i915, phy, ch, override, mask);
mutex_unlock(&power_domains->lock);
  }
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *i915,
  					struct i915_power_well *power_well)
  {
  	enum pipe pipe = PIPE_A;
  	bool enabled;
  	u32 state, ctrl;
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+	state = vlv_punit_read(i915, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
  	/*
  	 * We only ever set the power-on and power-gate states, anything
  	 * else is unexpected.
@@ -1475,15 +1475,15 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  	 * A transient state at this point would mean some unexpected party
  	 * is poking at the power controls too.
  	 */
-	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+	ctrl = vlv_punit_read(i915, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
  	WARN_ON(ctrl << 16 != state);
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
return enabled;
  }
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+static void chv_set_pipe_power_well(struct drm_i915_private *i915,
  				    struct i915_power_well *power_well,
  				    bool enable)
  {
@@ -1493,44 +1493,44 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
#define COND \
-	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+	((vlv_punit_read(i915, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
  		goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+	ctrl = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
  	ctrl &= ~DP_SSC_MASK(pipe);
  	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+	vlv_punit_write(i915, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
  		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  			  state,
-			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+			  vlv_punit_read(i915, PUNIT_REG_DSPSSPM));
#undef COND out:
-	vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
  }
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_enable(struct drm_i915_private *i915,
  				       struct i915_power_well *power_well)
  {
-	chv_set_pipe_power_well(dev_priv, power_well, true);
+	chv_set_pipe_power_well(i915, power_well, true);
- vlv_display_power_well_init(dev_priv);
+	vlv_display_power_well_init(i915);
  }
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_disable(struct drm_i915_private *i915,
  					struct i915_power_well *power_well)
  {
-	vlv_display_power_well_deinit(dev_priv);
+	vlv_display_power_well_deinit(i915);
- chv_set_pipe_power_well(dev_priv, power_well, false);
+	chv_set_pipe_power_well(i915, power_well, false);
  }
static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
@@ -1627,10 +1627,10 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains,
  }
static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+intel_display_power_grab_async_put_ref(struct drm_i915_private *i915,
  				       enum intel_display_power_domain domain)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	bool ret = false;
if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
@@ -1644,7 +1644,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
  		goto out_verify;
cancel_delayed_work(&power_domains->async_put_work);
-	intel_runtime_pm_put_raw(dev_priv,
+	intel_runtime_pm_put_raw(i915,
  				 fetch_and_zero(&power_domains->async_put_wakeref));
  out_verify:
  	verify_async_put_domains_state(power_domains);
@@ -1653,24 +1653,24 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
  }
static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_get_domain(struct drm_i915_private *i915,
  				 enum intel_display_power_domain domain)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *power_well;
- if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+	if (intel_display_power_grab_async_put_ref(i915, domain))
  		return;
- for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
-		intel_power_well_get(dev_priv, power_well);
+	for_each_power_domain_well(i915, power_well, BIT_ULL(domain))
+		intel_power_well_get(i915, power_well);
power_domains->domain_use_count[domain]++;
  }
/**
   * intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to reference
   *
   * This function grabs a power domain reference for @domain and ensures that the
@@ -1680,14 +1680,14 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
   * Any power domain reference obtained by this function must have a symmetric
   * call to intel_display_power_put() to release the reference again.
   */
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *i915,
  					enum intel_display_power_domain domain)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
+	struct i915_power_domains *power_domains = &i915->power_domains;
+	intel_wakeref_t wakeref = intel_runtime_pm_get(i915);
mutex_lock(&power_domains->lock);
-	__intel_display_power_get_domain(dev_priv, domain);
+	__intel_display_power_get_domain(i915, domain);
  	mutex_unlock(&power_domains->lock);
return wakeref;
@@ -1695,7 +1695,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
/**
   * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to reference
   *
   * This function grabs a power domain reference for @domain and ensures that the
@@ -1706,21 +1706,21 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
   * call to intel_display_power_put() to release the reference again.
   */
  intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct drm_i915_private *i915,
  				   enum intel_display_power_domain domain)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	intel_wakeref_t wakeref;
  	bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+	wakeref = intel_runtime_pm_get_if_in_use(i915);
  	if (!wakeref)
  		return false;
mutex_lock(&power_domains->lock); - if (__intel_display_power_is_enabled(dev_priv, domain)) {
-		__intel_display_power_get_domain(dev_priv, domain);
+	if (__intel_display_power_is_enabled(i915, domain)) {
+		__intel_display_power_get_domain(i915, domain);
  		is_enabled = true;
  	} else {
  		is_enabled = false;
@@ -1729,7 +1729,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  	mutex_unlock(&power_domains->lock);
if (!is_enabled) {
-		intel_runtime_pm_put(dev_priv, wakeref);
+		intel_runtime_pm_put(i915, wakeref);
  		wakeref = 0;
  	}
@@ -1737,14 +1737,14 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  }
static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_put_domain(struct drm_i915_private *i915,
  				 enum intel_display_power_domain domain)
  {
  	struct i915_power_domains *power_domains;
  	struct i915_power_well *power_well;
  	const char *name = intel_display_power_domain_str(domain);
- power_domains = &dev_priv->power_domains;
+	power_domains = &i915->power_domains;
WARN(!power_domains->domain_use_count[domain],
  	     "Use count on domain %s is already zero\n",
@@ -1755,23 +1755,23 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
power_domains->domain_use_count[domain]--; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
-		intel_power_well_put(dev_priv, power_well);
+	for_each_power_domain_well_reverse(i915, power_well, BIT_ULL(domain))
+		intel_power_well_put(i915, power_well);
  }
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+static void __intel_display_power_put(struct drm_i915_private *i915,
  				      enum intel_display_power_domain domain)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
mutex_lock(&power_domains->lock);
-	__intel_display_power_put_domain(dev_priv, domain);
+	__intel_display_power_put_domain(i915, domain);
  	mutex_unlock(&power_domains->lock);
  }
/**
   * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to reference
   *
   * This function drops the power domain reference obtained by
@@ -1782,11 +1782,11 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
   * new code, as the correctness of its use cannot be checked. Always use
   * intel_display_power_put() instead.
   */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct drm_i915_private *i915,
  				       enum intel_display_power_domain domain)
  {
-	__intel_display_power_put(dev_priv, domain);
-	intel_runtime_pm_put_unchecked(dev_priv);
+	__intel_display_power_put(i915, domain);
+	intel_runtime_pm_put_unchecked(i915);
  }
static void
@@ -1803,7 +1803,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
  static void
  release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(power_domains, struct drm_i915_private,
  			     power_domains);
  	enum intel_display_power_domain domain;
@@ -1814,26 +1814,26 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
  	 * wakeref to make the state checker happy about the HW access during
  	 * power well disabling.
  	 */
-	assert_rpm_raw_wakeref_held(dev_priv);
-	wakeref = intel_runtime_pm_get(dev_priv);
+	assert_rpm_raw_wakeref_held(i915);
+	wakeref = intel_runtime_pm_get(i915);
for_each_power_domain(domain, mask) {
  		/* Clear before put, so put's sanity check is happy. */
  		async_put_domains_clear_domain(power_domains, domain);
-		__intel_display_power_put_domain(dev_priv, domain);
+		__intel_display_power_put_domain(i915, domain);
  	}
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  }
static void
  intel_display_power_put_async_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private,
  			     power_domains.async_put_work.work);
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
+	struct i915_power_domains *power_domains = &i915->power_domains;
+	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(i915);
  	intel_wakeref_t old_work_wakeref = 0;
mutex_lock(&power_domains->lock);
@@ -1863,9 +1863,9 @@ intel_display_power_put_async_work(struct work_struct *work)
  	mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
-		intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
+		intel_runtime_pm_put_raw(i915, old_work_wakeref);
  	if (new_work_wakeref)
-		intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
+		intel_runtime_pm_put_raw(i915, new_work_wakeref);
  }
/**
@@ -1974,7 +1974,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
  #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
  /**
   * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @domain: power domain to reference
   * @wakeref: wakeref acquired for the reference that is being released
   *
@@ -1982,12 +1982,12 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
   * intel_display_power_get() and might power down the corresponding hardware
   * block right away if this is the last reference.
   */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct drm_i915_private *i915,
  			     enum intel_display_power_domain domain,
  			     intel_wakeref_t wakeref)
  {
-	__intel_display_power_put(dev_priv, domain);
-	intel_runtime_pm_put(dev_priv, wakeref);
+	__intel_display_power_put(i915, domain);
+	intel_runtime_pm_put(i915, wakeref);
  }
  #endif
@@ -2655,14 +2655,14 @@ static const struct i915_power_well_desc chv_power_wells[] = {
  	},
  };
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct drm_i915_private *i915,
  					 enum i915_power_well_id power_well_id)
  {
  	struct i915_power_well *power_well;
  	bool ret;
- power_well = lookup_power_well(dev_priv, power_well_id);
-	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
+	power_well = lookup_power_well(i915, power_well_id);
+	ret = power_well->desc->ops->is_enabled(i915, power_well);
return ret;
  }
@@ -3352,7 +3352,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
  };
static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+sanitize_disable_power_well_option(const struct drm_i915_private *i915,
  				   int disable_power_well)
  {
  	if (disable_power_well >= 0)
@@ -3361,14 +3361,14 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
  	return 1;
  }
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+static u32 get_allowed_dc_mask(const struct drm_i915_private *i915,
  			       int enable_dc)
  {
  	u32 mask;
  	int requested_dc;
  	int max_dc;
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		max_dc = 2;
  		/*
  		 * DC9 has a separate HW flow from the rest of the DC states,
@@ -3376,10 +3376,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
  		 * suspend/resume, so allow it unconditionally.
  		 */
  		mask = DC_STATE_EN_DC9;
-	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
+	} else if (IS_GEN(i915, 10) || IS_GEN9_BC(i915)) {
  		max_dc = 2;
  		mask = 0;
-	} else if (IS_GEN9_LP(dev_priv)) {
+	} else if (IS_GEN9_LP(i915)) {
  		max_dc = 1;
  		mask = DC_STATE_EN_DC9;
  	} else {
@@ -3451,21 +3451,21 @@ __set_power_wells(struct i915_power_domains *power_domains,
/**
   * intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
- * Initializes the power domain structures for @dev_priv depending upon the
+ * Initializes the power domain structures for @i915 depending upon the
   * supported platform.
   */
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
+int intel_power_domains_init(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	int err;
i915_modparams.disable_power_well =
-		sanitize_disable_power_well_option(dev_priv,
+		sanitize_disable_power_well_option(i915,
  						   i915_modparams.disable_power_well);
-	dev_priv->csr.allowed_dc_mask =
-		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+	i915->csr.allowed_dc_mask =
+		get_allowed_dc_mask(i915, i915_modparams.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); @@ -3478,9 +3478,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  	 * The enabling order will be from lower to higher indexed wells,
  	 * the disabling order is reversed.
  	 */
-	if (IS_GEN(dev_priv, 11)) {
+	if (IS_GEN(i915, 11)) {
  		err = set_power_wells(power_domains, icl_power_wells);
-	} else if (IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_CANNONLAKE(i915)) {
  		err = set_power_wells(power_domains, cnl_power_wells);
/*
@@ -3489,23 +3489,23 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  		 * timeouts, lets remove them from the list
  		 * for the SKUs without port F.
  		 */
-		if (!IS_CNL_WITH_PORT_F(dev_priv))
+		if (!IS_CNL_WITH_PORT_F(i915))
  			power_domains->power_well_count -= 2;
-	} else if (IS_GEMINILAKE(dev_priv)) {
+	} else if (IS_GEMINILAKE(i915)) {
  		err = set_power_wells(power_domains, glk_power_wells);
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
  		err = set_power_wells(power_domains, bxt_power_wells);
-	} else if (IS_GEN9_BC(dev_priv)) {
+	} else if (IS_GEN9_BC(i915)) {
  		err = set_power_wells(power_domains, skl_power_wells);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_CHERRYVIEW(i915)) {
  		err = set_power_wells(power_domains, chv_power_wells);
-	} else if (IS_BROADWELL(dev_priv)) {
+	} else if (IS_BROADWELL(i915)) {
  		err = set_power_wells(power_domains, bdw_power_wells);
-	} else if (IS_HASWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915)) {
  		err = set_power_wells(power_domains, hsw_power_wells);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		err = set_power_wells(power_domains, vlv_power_wells);
-	} else if (IS_I830(dev_priv)) {
+	} else if (IS_I830(i915)) {
  		err = set_power_wells(power_domains, i830_power_wells);
  	} else {
  		err = set_power_wells(power_domains, i9xx_always_on_power_well);
@@ -3516,31 +3516,31 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
/**
   * intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Release any resources acquired by intel_power_domains_init()
   */
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct drm_i915_private *i915)
  {
-	kfree(dev_priv->power_domains.power_wells);
+	kfree(i915->power_domains.power_wells);
  }
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+static void intel_power_domains_sync_hw(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
-	for_each_power_well(dev_priv, power_well) {
-		power_well->desc->ops->sync_hw(dev_priv, power_well);
+	for_each_power_well(i915, power_well) {
+		power_well->desc->ops->sync_hw(i915, power_well);
  		power_well->hw_enabled =
-			power_well->desc->ops->is_enabled(dev_priv, power_well);
+			power_well->desc->ops->is_enabled(i915, power_well);
  	}
  	mutex_unlock(&power_domains->lock);
  }
static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+bool intel_dbuf_slice_set(struct drm_i915_private *i915,
  			  i915_reg_t reg, bool enable)
  {
  	u32 val, status;
@@ -3560,30 +3560,30 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
  	return true;
  }
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct drm_i915_private *i915)
  {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+	intel_dbuf_slice_set(i915, DBUF_CTL, true);
  }
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct drm_i915_private *i915)
  {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+	intel_dbuf_slice_set(i915, DBUF_CTL, false);
  }
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+static u8 intel_dbuf_max_slices(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return 1;
  	return 2;
  }
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void icl_dbuf_slices_update(struct drm_i915_private *i915,
  			    u8 req_slices)
  {
-	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+	const u8 hw_enabled_slices = i915->wm.skl_hw.ddb.enabled_slices;
  	bool ret;
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+	if (req_slices > intel_dbuf_max_slices(i915)) {
  		DRM_ERROR("Invalid number of dbuf slices requested\n");
  		return;
  	}
@@ -3592,15 +3592,15 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
  		return;
if (req_slices > hw_enabled_slices)
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+		ret = intel_dbuf_slice_set(i915, DBUF_CTL_S2, true);
  	else
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+		ret = intel_dbuf_slice_set(i915, DBUF_CTL_S2, false);
if (ret)
-		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+		i915->wm.skl_hw.ddb.enabled_slices = req_slices;
  }
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+static void icl_dbuf_enable(struct drm_i915_private *i915)
  {
  	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
  	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
@@ -3616,10 +3616,10 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
  		 * FIXME: for now pretend that we only have 1 slice, see
  		 * intel_enabled_dbuf_slices_num().
  		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+		i915->wm.skl_hw.ddb.enabled_slices = 1;
  }
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+static void icl_dbuf_disable(struct drm_i915_private *i915)
  {
  	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
  	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
@@ -3635,10 +3635,10 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
  		 * FIXME: for now pretend that the first slice is always
  		 * enabled, see intel_enabled_dbuf_slices_num().
  		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+		i915->wm.skl_hw.ddb.enabled_slices = 1;
  }
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
+static void icl_mbus_init(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -3650,7 +3650,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
  	I915_WRITE(MBUS_ABOX_CTL, val);
  }
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+static void hsw_assert_cdclk(struct drm_i915_private *i915)
  {
  	u32 val = I915_READ(LCPLL_CTL);
@@ -3670,9 +3670,9 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
  		DRM_ERROR("LCPLL not using non-SSC reference\n");
  }
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+static void assert_can_disable_lcpll(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -3691,7 +3691,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  			"Panel power on\n");
  	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  			"CPU PWM1 enabled\n");
-	if (IS_HASWELL(dev_priv))
+	if (IS_HASWELL(i915))
  		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  				"CPU PWM2 enabled\n");
  	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -3707,21 +3707,21 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  	 * gen-specific and since we only disable LCPLL after we fully disable
  	 * the interrupts, the check below should be enough.
  	 */
-	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+	I915_STATE_WARN(intel_irqs_enabled(i915), "IRQs enabled\n");
  }
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct drm_i915_private *i915)
  {
-	if (IS_HASWELL(dev_priv))
+	if (IS_HASWELL(i915))
  		return I915_READ(D_COMP_HSW);
  	else
  		return I915_READ(D_COMP_BDW);
  }
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+static void hsw_write_dcomp(struct drm_i915_private *i915, u32 val)
  {
-	if (IS_HASWELL(dev_priv)) {
-		if (sandybridge_pcode_write(dev_priv,
+	if (IS_HASWELL(i915)) {
+		if (sandybridge_pcode_write(i915,
  					    GEN6_PCODE_WRITE_D_COMP, val))
  			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
  	} else {
@@ -3738,12 +3738,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
   * register. Callers should take care of disabling all the display engine
   * functions, doing the mode unset, fixing interrupts, etc.
   */
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+static void hsw_disable_lcpll(struct drm_i915_private *i915,
  			      bool switch_to_fclk, bool allow_power_down)
  {
  	u32 val;
- assert_can_disable_lcpll(dev_priv);
+	assert_can_disable_lcpll(i915);
val = I915_READ(LCPLL_CTL); @@ -3762,16 +3762,16 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  	I915_WRITE(LCPLL_CTL, val);
  	POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+	if (intel_wait_for_register(&i915->uncore, LCPLL_CTL,
  				    LCPLL_PLL_LOCK, 0, 1))
  		DRM_ERROR("LCPLL still locked\n");
- val = hsw_read_dcomp(dev_priv);
+	val = hsw_read_dcomp(i915);
  	val |= D_COMP_COMP_DISABLE;
-	hsw_write_dcomp(dev_priv, val);
+	hsw_write_dcomp(i915, val);
  	ndelay(100);
- if (wait_for((hsw_read_dcomp(dev_priv) &
+	if (wait_for((hsw_read_dcomp(i915) &
  		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
  		DRM_ERROR("D_COMP RCOMP still in progress\n");
@@ -3787,7 +3787,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
   * Fully restores LCPLL, disallowing power down and switching back to LCPLL
   * source.
   */
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *i915)
  {
  	u32 val;
@@ -3801,7 +3801,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  	 * Make sure we're not on PC8 state before disabling PC8, otherwise
  	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
  		val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -3809,16 +3809,16 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  		POSTING_READ(LCPLL_CTL);
  	}
- val = hsw_read_dcomp(dev_priv);
+	val = hsw_read_dcomp(i915);
  	val |= D_COMP_COMP_FORCE;
  	val &= ~D_COMP_COMP_DISABLE;
-	hsw_write_dcomp(dev_priv, val);
+	hsw_write_dcomp(i915, val);
val = I915_READ(LCPLL_CTL);
  	val &= ~LCPLL_PLL_DISABLE;
  	I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+	if (intel_wait_for_register(&i915->uncore, LCPLL_CTL,
  				    LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
  		DRM_ERROR("LCPLL not locked yet\n");
@@ -3832,10 +3832,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  			DRM_ERROR("Switching back to LCPLL failed\n");
  	}
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_update_cdclk(i915);
+	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
  }
/*
@@ -3861,45 +3861,45 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
   * For more, read "Display Sequences for Package C8" on the hardware
   * documentation.
   */
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+void hsw_enable_pc8(struct drm_i915_private *i915)
  {
  	u32 val;
DRM_DEBUG_KMS("Enabling package C8+\n"); - if (HAS_PCH_LPT_LP(dev_priv)) {
+	if (HAS_PCH_LPT_LP(i915)) {
  		val = I915_READ(SOUTH_DSPCLK_GATE_D);
  		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  	}
- lpt_disable_clkout_dp(dev_priv);
-	hsw_disable_lcpll(dev_priv, true, true);
+	lpt_disable_clkout_dp(i915);
+	hsw_disable_lcpll(i915, true, true);
  }
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+void hsw_disable_pc8(struct drm_i915_private *i915)
  {
  	u32 val;
DRM_DEBUG_KMS("Disabling package C8+\n"); - hsw_restore_lcpll(dev_priv);
-	intel_init_pch_refclk(dev_priv);
+	hsw_restore_lcpll(i915);
+	intel_init_pch_refclk(i915);
- if (HAS_PCH_LPT_LP(dev_priv)) {
+	if (HAS_PCH_LPT_LP(i915)) {
  		val = I915_READ(SOUTH_DSPCLK_GATE_D);
  		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  	}
  }
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+static void intel_pch_reset_handshake(struct drm_i915_private *i915,
  				      bool enable)
  {
  	i915_reg_t reg;
  	u32 reset_bits, val;
- if (IS_IVYBRIDGE(dev_priv)) {
+	if (IS_IVYBRIDGE(i915)) {
  		reg = GEN7_MSG_CTL;
  		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
  	} else {
@@ -3917,46 +3917,46 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
  	I915_WRITE(reg, val);
  }
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
+static void skl_display_core_init(struct drm_i915_private *i915,
  				  bool resume)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/* enable PCH reset handshake */
-	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+	intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
/* enable PG1 and Misc I/O */
  	mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_enable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_enable(i915, well);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-	intel_power_well_enable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_MISC_IO);
+	intel_power_well_enable(i915, well);
mutex_unlock(&power_domains->lock); - intel_cdclk_init(dev_priv);
+	intel_cdclk_init(i915);
- gen9_dbuf_enable(dev_priv);
+	gen9_dbuf_enable(i915);
- if (resume && dev_priv->csr.dmc_payload)
-		intel_csr_load_program(dev_priv);
+	if (resume && i915->csr.dmc_payload)
+		intel_csr_load_program(i915);
  }
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void skl_display_core_uninit(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
- gen9_dbuf_disable(dev_priv);
+	gen9_dbuf_disable(i915);
- intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit(i915);
/* The spec doesn't call for removing the reset handshake flag */
  	/* disable PG1 and Misc I/O */
@@ -3969,21 +3969,21 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
  	 * Note that even though the driver's request is removed power well 1
  	 * may stay enabled after this due to DMC's own request on it.
  	 */
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_disable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_disable(i915, well);
mutex_unlock(&power_domains->lock); usleep_range(10, 30); /* 10 us delay per Bspec */
  }
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
+void bxt_display_core_init(struct drm_i915_private *i915,
  			   bool resume)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/*
  	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
@@ -3991,34 +3991,34 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
  	 * Move the handshake programming to initialization sequence.
  	 * Previously was left up to BIOS.
  	 */
-	intel_pch_reset_handshake(dev_priv, false);
+	intel_pch_reset_handshake(i915, false);
/* Enable PG1 */
  	mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_enable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_enable(i915, well);
mutex_unlock(&power_domains->lock); - intel_cdclk_init(dev_priv);
+	intel_cdclk_init(i915);
- gen9_dbuf_enable(dev_priv);
+	gen9_dbuf_enable(i915);
- if (resume && dev_priv->csr.dmc_payload)
-		intel_csr_load_program(dev_priv);
+	if (resume && i915->csr.dmc_payload)
+		intel_csr_load_program(i915);
  }
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+void bxt_display_core_uninit(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
- gen9_dbuf_disable(dev_priv);
+	gen9_dbuf_disable(i915);
- intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit(i915);
/* The spec doesn't call for removing the reset handshake flag */ @@ -4029,60 +4029,60 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
  	 */
  	mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_disable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_disable(i915, well);
mutex_unlock(&power_domains->lock); usleep_range(10, 30); /* 10 us delay per Bspec */
  }
-static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+static void cnl_display_core_init(struct drm_i915_private *i915, bool resume)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/* 1. Enable PCH Reset Handshake */
-	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+	intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
/* 2-3. */
-	intel_combo_phy_init(dev_priv);
+	intel_combo_phy_init(i915);
/*
  	 * 4. Enable Power Well 1 (PG1).
  	 *    The AUX IO power wells will be enabled on demand.
  	 */
  	mutex_lock(&power_domains->lock);
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_enable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_enable(i915, well);
  	mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init(i915);
/* 6. Enable DBUF */
-	gen9_dbuf_enable(dev_priv);
+	gen9_dbuf_enable(i915);
- if (resume && dev_priv->csr.dmc_payload)
-		intel_csr_load_program(dev_priv);
+	if (resume && i915->csr.dmc_payload)
+		intel_csr_load_program(i915);
  }
-static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void cnl_display_core_uninit(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/* 1. Disable all display engine functions -> aready done */ /* 2. Disable DBUF */
-	gen9_dbuf_disable(dev_priv);
+	gen9_dbuf_disable(i915);
/* 3. Disable CD clock */
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit(i915);
/*
  	 * 4. Disable Power Well 1 (PG1).
@@ -4090,66 +4090,66 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
  	 *    disabled at this point.
  	 */
  	mutex_lock(&power_domains->lock);
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_disable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_disable(i915, well);
  	mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */ /* 5. */
-	intel_combo_phy_uninit(dev_priv);
+	intel_combo_phy_uninit(i915);
  }
-void icl_display_core_init(struct drm_i915_private *dev_priv,
+void icl_display_core_init(struct drm_i915_private *i915,
  			   bool resume)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/* 1. Enable PCH reset handshake. */
-	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+	intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
/* 2. Initialize all combo phys */
-	intel_combo_phy_init(dev_priv);
+	intel_combo_phy_init(i915);
/*
  	 * 3. Enable Power Well 1 (PG1).
  	 *    The AUX IO power wells will be enabled on demand.
  	 */
  	mutex_lock(&power_domains->lock);
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_enable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_enable(i915, well);
  	mutex_unlock(&power_domains->lock);
/* 4. Enable CDCLK. */
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init(i915);
/* 5. Enable DBUF. */
-	icl_dbuf_enable(dev_priv);
+	icl_dbuf_enable(i915);
/* 6. Setup MBUS. */
-	icl_mbus_init(dev_priv);
+	icl_mbus_init(i915);
- if (resume && dev_priv->csr.dmc_payload)
-		intel_csr_load_program(dev_priv);
+	if (resume && i915->csr.dmc_payload)
+		intel_csr_load_program(i915);
  }
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+	gen9_set_dc_state(i915, DC_STATE_DISABLE);
/* 1. Disable all display engine functions -> aready done */ /* 2. Disable DBUF */
-	icl_dbuf_disable(dev_priv);
+	icl_dbuf_disable(i915);
/* 3. Disable CD clock */
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit(i915);
/*
  	 * 4. Disable Power Well 1 (PG1).
@@ -4157,20 +4157,20 @@ void icl_display_core_uninit(struct drm_i915_private *dev_priv)
  	 *    disabled at this point.
  	 */
  	mutex_lock(&power_domains->lock);
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_disable(dev_priv, well);
+	well = lookup_power_well(i915, SKL_DISP_PW_1);
+	intel_power_well_disable(i915, well);
  	mutex_unlock(&power_domains->lock);
/* 5. */
-	intel_combo_phy_uninit(dev_priv);
+	intel_combo_phy_uninit(i915);
  }
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+static void chv_phy_control_init(struct drm_i915_private *i915)
  {
  	struct i915_power_well *cmn_bc =
-		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+		lookup_power_well(i915, VLV_DISP_PW_DPIO_CMN_BC);
  	struct i915_power_well *cmn_d =
-		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+		lookup_power_well(i915, CHV_DISP_PW_DPIO_CMN_D);
/*
  	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -4179,7 +4179,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  	 * power well state and lane status to reconstruct the
  	 * expected initial value.
  	 */
-	dev_priv->chv_phy_control =
+	i915->chv_phy_control =
  		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
  		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
  		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -4193,7 +4193,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  	 * override and set the lane powerdown bits accding to the
  	 * current lane status.
  	 */
-	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+	if (cmn_bc->desc->ops->is_enabled(i915, cmn_bc)) {
  		u32 status = I915_READ(DPLL(PIPE_A));
  		unsigned int mask;
@@ -4201,30 +4201,30 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  		if (mask == 0xf)
  			mask = 0x0;
  		else
-			dev_priv->chv_phy_control |=
+			i915->chv_phy_control |=
  				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->chv_phy_control |=
+		i915->chv_phy_control |=
  			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
  		if (mask == 0xf)
  			mask = 0x0;
  		else
-			dev_priv->chv_phy_control |=
+			i915->chv_phy_control |=
  				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |=
+		i915->chv_phy_control |=
  			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+		i915->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+		i915->chv_phy_assert[DPIO_PHY0] = false;
  	} else {
-		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+		i915->chv_phy_assert[DPIO_PHY0] = true;
  	}
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+	if (cmn_d->desc->ops->is_enabled(i915, cmn_d)) {
  		u32 status = I915_READ(DPIO_PHY_STATUS);
  		unsigned int mask;
@@ -4233,42 +4233,42 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  		if (mask == 0xf)
  			mask = 0x0;
  		else
-			dev_priv->chv_phy_control |=
+			i915->chv_phy_control |=
  				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |=
+		i915->chv_phy_control |=
  			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+		i915->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+		i915->chv_phy_assert[DPIO_PHY1] = false;
  	} else {
-		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+		i915->chv_phy_assert[DPIO_PHY1] = true;
  	}
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	I915_WRITE(DISPLAY_PHY_CONTROL, i915->chv_phy_control);
DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
-		      dev_priv->chv_phy_control);
+		      i915->chv_phy_control);
  }
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+static void vlv_cmnlane_wa(struct drm_i915_private *i915)
  {
  	struct i915_power_well *cmn =
-		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+		lookup_power_well(i915, VLV_DISP_PW_DPIO_CMN_BC);
  	struct i915_power_well *disp2d =
-		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+		lookup_power_well(i915, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
-	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
-	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
+	if (cmn->desc->ops->is_enabled(i915, cmn) &&
+	    disp2d->desc->ops->is_enabled(i915, disp2d) &&
  	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
  		return;
DRM_DEBUG_KMS("toggling display PHY side reset\n"); /* cmnlane needs DPLL registers */
-	disp2d->desc->ops->enable(dev_priv, disp2d);
+	disp2d->desc->ops->enable(i915, disp2d);
/*
  	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -4277,27 +4277,27 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  	 * Simply ungating isn't enough to reset the PHY enough to get
  	 * ports and lanes running.
  	 */
-	cmn->desc->ops->disable(dev_priv, cmn);
+	cmn->desc->ops->disable(i915, cmn);
  }
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+static bool vlv_punit_is_power_gated(struct drm_i915_private *i915, u32 reg0)
  {
  	bool ret;
- vlv_punit_get(dev_priv);
-	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
-	vlv_punit_put(dev_priv);
+	vlv_punit_get(i915);
+	ret = (vlv_punit_read(i915, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+	vlv_punit_put(i915);
return ret;
  }
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+static void assert_ved_power_gated(struct drm_i915_private *i915)
  {
-	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+	WARN(!vlv_punit_is_power_gated(i915, PUNIT_REG_VEDSSPM0),
  	     "VED not power gated\n");
  }
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+static void assert_isp_power_gated(struct drm_i915_private *i915)
  {
  	static const struct pci_device_id isp_ids[] = {
  		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
@@ -4306,11 +4306,11 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
  	};
WARN(!pci_dev_present(isp_ids) &&
-	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+	     !vlv_punit_is_power_gated(i915, PUNIT_REG_ISPSSPM0),
  	     "ISP not power gated\n");
  }
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+static void intel_power_domains_verify_state(struct drm_i915_private *i915);
/**
   * intel_power_domains_init_hw - initialize hardware power domain state
diff --git a/drivers/gpu/drm/i915/intel_display_power.h b/drivers/gpu/drm/i915/intel_display_power.h
index ff57b0a7fe59..0310fe114ecc 100644
--- a/drivers/gpu/drm/i915/intel_display_power.h
+++ b/drivers/gpu/drm/i915/intel_display_power.h
@@ -80,23 +80,23 @@ struct i915_power_well_ops {
  	 * during driver init and resume time, possibly after first calling
  	 * the enable/disable handlers.
  	 */
-	void (*sync_hw)(struct drm_i915_private *dev_priv,
+	void (*sync_hw)(struct drm_i915_private *i915,
  			struct i915_power_well *power_well);
  	/*
  	 * Enable the well and resources that depend on it (for example
  	 * interrupts located on the well). Called after the 0->1 refcount
  	 * transition.
  	 */
-	void (*enable)(struct drm_i915_private *dev_priv,
+	void (*enable)(struct drm_i915_private *i915,
  		       struct i915_power_well *power_well);
  	/*
  	 * Disable the well and resources that depend on it. Called after
  	 * the 1->0 refcount transition.
  	 */
-	void (*disable)(struct drm_i915_private *dev_priv,
+	void (*disable)(struct drm_i915_private *i915,
  			struct i915_power_well *power_well);
  	/* Returns the hw enabled state. */
-	bool (*is_enabled)(struct drm_i915_private *dev_priv,
+	bool (*is_enabled)(struct drm_i915_private *i915,
  			   struct i915_power_well *power_well);
  };
@@ -204,48 +204,48 @@ struct i915_power_domains {
  	for_each_power_well_reverse(__dev_priv, __power_well)		        \
  		for_each_if((__power_well)->desc->domains & (__domain_mask))
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+void skl_enable_dc6(struct drm_i915_private *i915);
+void gen9_sanitize_dc_state(struct drm_i915_private *i915);
+void bxt_enable_dc9(struct drm_i915_private *i915);
+void bxt_disable_dc9(struct drm_i915_private *i915);
+void gen9_enable_dc5(struct drm_i915_private *i915);
-int intel_power_domains_init(struct drm_i915_private *dev_priv);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+int intel_power_domains_init(struct drm_i915_private *i915);
+void intel_power_domains_cleanup(struct drm_i915_private *i915);
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume);
+void intel_power_domains_fini_hw(struct drm_i915_private *i915);
+void icl_display_core_init(struct drm_i915_private *i915, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *i915);
+void intel_power_domains_enable(struct drm_i915_private *i915);
+void intel_power_domains_disable(struct drm_i915_private *i915);
+void intel_power_domains_suspend(struct drm_i915_private *i915,
  				 enum i915_drm_suspend_mode);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_resume(struct drm_i915_private *i915);
+void hsw_enable_pc8(struct drm_i915_private *i915);
+void hsw_disable_pc8(struct drm_i915_private *i915);
+void bxt_display_core_init(struct drm_i915_private *i915, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *i915);
const char *
  intel_display_power_domain_str(enum intel_display_power_domain domain);
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct drm_i915_private *i915,
  				    enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool __intel_display_power_is_enabled(struct drm_i915_private *i915,
  				      enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *i915,
  					enum intel_display_power_domain domain);
  intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct drm_i915_private *i915,
  				   enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct drm_i915_private *i915,
  				       enum intel_display_power_domain domain);
  void __intel_display_power_put_async(struct drm_i915_private *i915,
  				     enum intel_display_power_domain domain,
  				     intel_wakeref_t wakeref);
  void intel_display_power_flush_work(struct drm_i915_private *i915);
  #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct drm_i915_private *i915,
  			     enum intel_display_power_domain domain,
  			     intel_wakeref_t wakeref);
  static inline void
@@ -277,12 +277,12 @@ intel_display_power_put_async(struct drm_i915_private *i915,
  	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
  	     intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void icl_dbuf_slices_update(struct drm_i915_private *i915,
  			    u8 req_slices);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  			     bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct drm_i915_private *i915, enum dpio_phy phy,
  			  enum dpio_channel ch, bool override);
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4336df46fe78..d4dc2b9506e5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -160,7 +160,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
  static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
  					   const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct drm_i915_private *i915,
  				      enum pipe pipe);
  static void intel_dp_unset_edid(struct intel_dp *intel_dp);
@@ -214,8 +214,8 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
  static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
  	intel_wakeref_t wakeref;
  	u32 lane_info;
@@ -223,7 +223,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
  		return 4;
lane_info = 0;
-	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
  		lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
  			     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
  				DP_LANE_ASSIGNMENT_SHIFT(tc_port);
@@ -284,8 +284,8 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	struct intel_encoder *encoder = &intel_dig_port->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	int max_dotclk = dev_priv->max_dotclk_freq;
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	int max_dotclk = i915->max_dotclk_freq;
  	int ds_max_dotclk;
int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
@@ -305,7 +305,7 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
  static int cnl_max_source_rate(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	enum port port = dig_port->base.port;
u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
@@ -315,7 +315,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
  		return 540000;
/* For this SKU 8.1G is supported in all ports */
-	if (IS_CNL_WITH_PORT_F(dev_priv))
+	if (IS_CNL_WITH_PORT_F(i915))
  		return 810000;
/* For other SKUs, max rate on ports A and D is 5.4G */
@@ -328,11 +328,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
  static int icl_max_source_rate(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	enum port port = dig_port->base.port;
- if (intel_port_is_combophy(dev_priv, port) &&
-	    !IS_ELKHARTLAKE(dev_priv) &&
+	if (intel_port_is_combophy(i915, port) &&
+	    !IS_ELKHARTLAKE(i915) &&
  	    !intel_dp_is_edp(intel_dp))
  		return 540000;
@@ -359,30 +359,30 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
  		162000, 270000
  	};
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[dig_port->base.port];
+		&i915->vbt.ddi_port_info[dig_port->base.port];
  	const int *source_rates;
  	int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
/* This should only be done once */
  	WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (INTEL_GEN(dev_priv) >= 10) {
+	if (INTEL_GEN(i915) >= 10) {
  		source_rates = cnl_rates;
  		size = ARRAY_SIZE(cnl_rates);
-		if (IS_GEN(dev_priv, 10))
+		if (IS_GEN(i915, 10))
  			max_rate = cnl_max_source_rate(intel_dp);
  		else
  			max_rate = icl_max_source_rate(intel_dp);
-	} else if (IS_GEN9_LP(dev_priv)) {
+	} else if (IS_GEN9_LP(i915)) {
  		source_rates = bxt_rates;
  		size = ARRAY_SIZE(bxt_rates);
-	} else if (IS_GEN9_BC(dev_priv)) {
+	} else if (IS_GEN9_BC(i915)) {
  		source_rates = skl_rates;
  		size = ARRAY_SIZE(skl_rates);
-	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
-		   IS_BROADWELL(dev_priv)) {
+	} else if ((IS_HASWELL(i915) && !IS_HSW_ULX(i915)) ||
+		   IS_BROADWELL(i915)) {
  		source_rates = hsw_rates;
  		size = ARRAY_SIZE(hsw_rates);
  	} else {
@@ -532,7 +532,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
  	struct intel_dp *intel_dp = intel_attached_dp(connector);
  	struct intel_connector *intel_connector = to_intel_connector(connector);
  	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	int target_clock = mode->clock;
  	int max_rate, mode_rate, max_lanes, max_link_clock;
  	int max_dotclk;
@@ -564,7 +564,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
  	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
  	 * integer value since we support only integer values of bpp.
  	 */
-	if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+	if ((INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)) &&
  	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
  		if (intel_dp_is_edp(intel_dp)) {
  			dsc_max_output_bpp =
@@ -630,17 +630,17 @@ intel_dp_pps_init(struct intel_dp *intel_dp);
  static intel_wakeref_t
  pps_lock(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	intel_wakeref_t wakeref;
/*
  	 * See intel_power_sequencer_reset() why we need
  	 * a power domain reference here.
  	 */
-	wakeref = intel_display_power_get(dev_priv,
+	wakeref = intel_display_power_get(i915,
  					  intel_aux_power_domain(dp_to_dig_port(intel_dp)));
- mutex_lock(&dev_priv->pps_mutex);
+	mutex_lock(&i915->pps_mutex);
return wakeref;
  }
@@ -648,10 +648,10 @@ pps_lock(struct intel_dp *intel_dp)
  static intel_wakeref_t
  pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- mutex_unlock(&dev_priv->pps_mutex);
-	intel_display_power_put(dev_priv,
+	mutex_unlock(&i915->pps_mutex);
+	intel_display_power_put(i915,
  				intel_aux_power_domain(dp_to_dig_port(intel_dp)),
  				wakeref);
  	return 0;
@@ -663,7 +663,7 @@ pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
  static void
  vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum pipe pipe = intel_dp->pps_pipe;
  	bool pll_enabled, release_cl_override = false;
@@ -687,7 +687,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  	DP |= DP_PORT_WIDTH(1);
  	DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev_priv))
+	if (IS_CHERRYVIEW(i915))
  		DP |= DP_PIPE_SEL_CHV(pipe);
  	else
  		DP |= DP_PIPE_SEL(pipe);
@@ -699,10 +699,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  	 * So enable temporarily it if it's not already enabled.
  	 */
  	if (!pll_enabled) {
-		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
-			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
+		release_cl_override = IS_CHERRYVIEW(i915) &&
+			!chv_phy_powergate_ch(i915, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
+		if (vlv_force_pll_on(i915, pipe, IS_CHERRYVIEW(i915) ?
  				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
  			DRM_ERROR("Failed to force on pll for pipe %c!\n",
  				  pipe_name(pipe));
@@ -726,14 +726,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  	POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
-		vlv_force_pll_off(dev_priv, pipe);
+		vlv_force_pll_off(i915, pipe);
if (release_cl_override)
-			chv_phy_powergate_ch(dev_priv, phy, ch, false);
+			chv_phy_powergate_ch(i915, phy, ch, false);
  	}
  }
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+static enum pipe vlv_find_free_pps(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
  	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
@@ -742,7 +742,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
  	 * We don't have power sequencer currently.
  	 * Pick one that's not used by other ports.
  	 */
-	for_each_intel_dp(&dev_priv->drm, encoder) {
+	for_each_intel_dp(&i915->drm, encoder) {
  		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (encoder->type == INTEL_OUTPUT_EDP) {
@@ -768,11 +768,11 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
  static enum pipe
  vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum pipe pipe;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
/* We should never land here with regular DP ports */
  	WARN_ON(!intel_dp_is_edp(intel_dp));
@@ -783,7 +783,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  	if (intel_dp->pps_pipe != INVALID_PIPE)
  		return intel_dp->pps_pipe;
- pipe = vlv_find_free_pps(dev_priv);
+	pipe = vlv_find_free_pps(i915);
/*
  	 * Didn't find one. This should not happen since there
@@ -792,7 +792,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  	if (WARN_ON(pipe == INVALID_PIPE))
  		pipe = PIPE_A;
- vlv_steal_power_sequencer(dev_priv, pipe);
+	vlv_steal_power_sequencer(i915, pipe);
  	intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
@@ -815,10 +815,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  static int
  bxt_power_sequencer_idx(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	int backlight_controller = dev_priv->vbt.backlight.controller;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	int backlight_controller = i915->vbt.backlight.controller;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
/* We should never land here with regular DP ports */
  	WARN_ON(!intel_dp_is_edp(intel_dp));
@@ -837,29 +837,29 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
  	return backlight_controller;
  }
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *i915,
  			       enum pipe pipe);
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *i915,
  			       enum pipe pipe)
  {
  	return I915_READ(PP_STATUS(pipe)) & PP_ON;
  }
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *i915,
  				enum pipe pipe)
  {
  	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
  }
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+static bool vlv_pipe_any(struct drm_i915_private *i915,
  			 enum pipe pipe)
  {
  	return true;
  }
static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+vlv_initial_pps_pipe(struct drm_i915_private *i915,
  		     enum port port,
  		     vlv_pipe_check pipe_check)
  {
@@ -872,7 +872,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
  		if (port_sel != PANEL_PORT_SELECT_VLV(port))
  			continue;
- if (!pipe_check(dev_priv, pipe))
+		if (!pipe_check(i915, pipe))
  			continue;
return pipe;
@@ -884,23 +884,23 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
  static void
  vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum port port = intel_dig_port->base.port;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
/* try to find a pipe with this port selected */
  	/* first pick one where the panel is on */
-	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+	intel_dp->pps_pipe = vlv_initial_pps_pipe(i915, port,
  						  vlv_pipe_has_pp_on);
  	/* didn't find one? pick one where vdd is on */
  	if (intel_dp->pps_pipe == INVALID_PIPE)
-		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+		intel_dp->pps_pipe = vlv_initial_pps_pipe(i915, port,
  							  vlv_pipe_has_vdd_on);
  	/* didn't find one? pick one with just the correct port */
  	if (intel_dp->pps_pipe == INVALID_PIPE)
-		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+		intel_dp->pps_pipe = vlv_initial_pps_pipe(i915, port,
  							  vlv_pipe_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
@@ -917,12 +917,12 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
  	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
  }
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
+void intel_power_sequencer_reset(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-		    !IS_GEN9_LP(dev_priv)))
+	if (WARN_ON(!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915) &&
+		    !IS_GEN9_LP(i915)))
  		return;
/*
@@ -935,7 +935,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
  	 * should use them always.
  	 */
- for_each_intel_dp(&dev_priv->drm, encoder) {
+	for_each_intel_dp(&i915->drm, encoder) {
  		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -943,7 +943,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
  		if (encoder->type != INTEL_OUTPUT_EDP)
  			continue;
- if (IS_GEN9_LP(dev_priv))
+		if (IS_GEN9_LP(i915))
  			intel_dp->pps_reset = true;
  		else
  			intel_dp->pps_pipe = INVALID_PIPE;
@@ -961,14 +961,14 @@ struct pps_registers {
  static void intel_pps_get_registers(struct intel_dp *intel_dp,
  				    struct pps_registers *regs)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	int pps_idx = 0;
memset(regs, 0, sizeof(*regs)); - if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		pps_idx = bxt_power_sequencer_idx(intel_dp);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		pps_idx = vlv_power_sequencer_pipe(intel_dp);
regs->pp_ctrl = PP_CONTROL(pps_idx);
@@ -977,7 +977,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
  	regs->pp_off = PP_OFF_DELAYS(pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
-	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+	if (IS_GEN9_LP(i915) || INTEL_PCH_TYPE(i915) >= PCH_CNP)
  		regs->pp_div = INVALID_MMIO_REG;
  	else
  		regs->pp_div = PP_DIVISOR(pps_idx);
@@ -1010,14 +1010,14 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
  {
  	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
  						 edp_notifier);
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
  		return 0;
with_pps_lock(intel_dp, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  			enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  			i915_reg_t pp_ctrl_reg, pp_div_reg;
  			u32 pp_div;
@@ -1039,11 +1039,11 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  	    intel_dp->pps_pipe == INVALID_PIPE)
  		return false;
@@ -1052,11 +1052,11 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  	    intel_dp->pps_pipe == INVALID_PIPE)
  		return false;
@@ -1066,7 +1066,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  static void
  intel_dp_check_edp(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
  		return;
@@ -1103,7 +1103,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (index)
  		return 0;
@@ -1112,12 +1112,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  	 * The clock divider is based off the hrawclk, and would like to run at
  	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
  	 */
-	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+	return DIV_ROUND_CLOSEST(i915->rawclk_freq, 2000);
  }
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (index)
@@ -1129,17 +1129,17 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  	 * divide by 2000 and use that
  	 */
  	if (dig_port->aux_ch == AUX_CH_A)
-		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
+		return DIV_ROUND_CLOSEST(i915->cdclk.hw.cdclk, 2000);
  	else
-		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+		return DIV_ROUND_CLOSEST(i915->rawclk_freq, 2000);
  }
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
  		/* Workaround for non-ULT HSW */
  		switch (index) {
  		case 0: return 63;
@@ -1166,16 +1166,16 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
  				u32 aux_clock_divider)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  			to_i915(intel_dig_port->base.base.dev);
  	u32 precharge, timeout;
- if (IS_GEN(dev_priv, 6))
+	if (IS_GEN(i915, 6))
  		precharge = 3;
  	else
  		precharge = 5;
- if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(i915))
  		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
  	else
  		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1481,7 +1481,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1498,7 +1498,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1515,7 +1515,7 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1534,7 +1534,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1553,7 +1553,7 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1573,7 +1573,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	enum aux_ch aux_ch = dig_port->aux_ch;
@@ -1600,14 +1600,14 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
  static void
  intel_dp_aux_init(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	struct intel_encoder *encoder = &dig_port->base;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
  		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_PCH_SPLIT(i915)) {
  		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
  		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
  	} else {
@@ -1615,16 +1615,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
  		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
  	}
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
-	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+	else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
  		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
-	else if (HAS_PCH_SPLIT(dev_priv))
+	else if (HAS_PCH_SPLIT(i915))
  		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
  	else
  		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
  	else
  		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
@@ -1655,20 +1655,20 @@ static void
  intel_dp_set_clock(struct intel_encoder *encoder,
  		   struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	const struct dp_link_dpll *divisor = NULL;
  	int i, count = 0;
- if (IS_G4X(dev_priv)) {
+	if (IS_G4X(i915)) {
  		divisor = g4x_dpll;
  		count = ARRAY_SIZE(g4x_dpll);
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_PCH_SPLIT(i915)) {
  		divisor = pch_dpll;
  		count = ARRAY_SIZE(pch_dpll);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_CHERRYVIEW(i915)) {
  		divisor = chv_dpll;
  		count = ARRAY_SIZE(chv_dpll);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		divisor = vlv_dpll;
  		count = ARRAY_SIZE(vlv_dpll);
  	}
@@ -1760,9 +1760,9 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
  static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
  					 const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 11 &&
+	return INTEL_GEN(i915) >= 11 &&
  		pipe_config->cpu_transcoder != TRANSCODER_A;
  }
@@ -1776,9 +1776,9 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
  static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
  					 const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 10 &&
+	return INTEL_GEN(i915) >= 10 &&
  		pipe_config->cpu_transcoder != TRANSCODER_A;
  }
@@ -1795,7 +1795,7 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
  static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
  				struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_connector *intel_connector = intel_dp->attached_connector;
  	int bpp, bpc;
@@ -1808,10 +1808,10 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
  	if (intel_dp_is_edp(intel_dp)) {
  		/* Get bpp from vbt only for panels that dont have bpp in edid */
  		if (intel_connector->base.display_info.bpc == 0 &&
-		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
+		    i915->vbt.edp.bpp && i915->vbt.edp.bpp < bpp) {
  			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-				      dev_priv->vbt.edp.bpp);
-			bpp = dev_priv->vbt.edp.bpp;
+				      i915->vbt.edp.bpp);
+			bpp = i915->vbt.edp.bpp;
  		}
  	}
@@ -1924,7 +1924,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
  				       struct link_config_limits *limits)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  	u8 dsc_max_bpc;
  	int pipe_bpp;
@@ -1988,7 +1988,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
  	 * is greater than the maximum Cdclock and if slice count is even
  	 * then we need to use 2 VDSC instances.
  	 */
-	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+	if (adjusted_mode->crtc_clock > i915->max_cdclk_freq) {
  		if (pipe_config->dsc_params.slice_count > 1) {
  			pipe_config->dsc_params.dsc_split = true;
  		} else {
@@ -2168,7 +2168,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  			struct intel_crtc_state *pipe_config,
  			struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
@@ -2181,7 +2181,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  					   DP_DPCD_QUIRK_CONSTANT_N);
  	int ret = 0, output_bpp;
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
+	if (HAS_PCH_SPLIT(i915) && !HAS_DDI(i915) && port != PORT_A)
  		pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -2195,7 +2195,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  		return ret;
pipe_config->has_drrs = false;
-	if (IS_G4X(dev_priv) || port == PORT_A)
+	if (IS_G4X(i915) || port == PORT_A)
  		pipe_config->has_audio = false;
  	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
  		pipe_config->has_audio = intel_dp->has_audio;
@@ -2206,13 +2206,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
  				       adjusted_mode);
- if (INTEL_GEN(dev_priv) >= 9) {
+		if (INTEL_GEN(i915) >= 9) {
  			ret = skl_update_scaler_crtc(pipe_config);
  			if (ret)
  				return ret;
  		}
- if (HAS_GMCH(dev_priv))
+		if (HAS_GMCH(i915))
  			intel_gmch_panel_fitting(intel_crtc, pipe_config,
  						 conn_state->scaling_mode);
  		else
@@ -2223,7 +2223,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
  		return -EINVAL;
- if (HAS_GMCH(dev_priv) &&
+	if (HAS_GMCH(i915) &&
  	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  		return -EINVAL;
@@ -2250,7 +2250,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  			       constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
-		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
+		i915->drrs.type == SEAMLESS_DRRS_SUPPORT) {
  			pipe_config->has_drrs = true;
  			intel_link_compute_m_n(output_bpp,
  					       pipe_config->lane_count,
@@ -2260,7 +2260,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
  					       constant_n);
  	}
- if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
@@ -2281,7 +2281,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
  static void intel_dp_prepare(struct intel_encoder *encoder,
  			     const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	enum port port = encoder->port;
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@@ -2320,7 +2320,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */ - if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+	if (IS_IVYBRIDGE(i915) && port == PORT_A) {
  		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  			intel_dp->DP |= DP_SYNC_HS_HIGH;
  		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -2331,7 +2331,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
  			intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
-	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+	} else if (HAS_PCH_CPT(i915) && port != PORT_A) {
  		u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -2343,7 +2343,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
  			trans_dp &= ~TRANS_DP_ENH_FRAMING;
  		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
  	} else {
-		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+		if (IS_G4X(i915) && pipe_config->limited_color_range)
  			intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -2355,7 +2355,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
  		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  			intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev_priv))
+		if (IS_CHERRYVIEW(i915))
  			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
  		else
  			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
@@ -2377,10 +2377,10 @@ static void wait_panel_status(struct intel_dp *intel_dp,
  				       u32 mask,
  				       u32 value)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
intel_pps_verify_state(intel_dp); @@ -2392,7 +2392,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
  			I915_READ(pp_stat_reg),
  			I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    pp_stat_reg, mask, value,
  				    5000))
  		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@@ -2453,13 +2453,13 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 control;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
control = I915_READ(_pp_ctrl_reg(intel_dp));
-	if (WARN_ON(!HAS_DDI(dev_priv) &&
+	if (WARN_ON(!HAS_DDI(i915) &&
  		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
  		control &= ~PANEL_UNLOCK_MASK;
  		control |= PANEL_UNLOCK_REGS;
@@ -2474,13 +2474,13 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
   */
  static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	u32 pp;
  	i915_reg_t pp_stat_reg, pp_ctrl_reg;
  	bool need_to_disable = !intel_dp->want_panel_vdd;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
if (!intel_dp_is_edp(intel_dp))
  		return false;
@@ -2491,7 +2491,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  	if (edp_have_panel_vdd(intel_dp))
  		return need_to_disable;
- intel_display_power_get(dev_priv,
+	intel_display_power_get(i915,
  				intel_aux_power_domain(intel_dig_port));
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -2546,13 +2546,13 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port =
  		dp_to_dig_port(intel_dp);
  	u32 pp;
  	i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
WARN_ON(intel_dp->want_panel_vdd); @@ -2578,7 +2578,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  	if ((pp & PANEL_POWER_ON) == 0)
  		intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put_unchecked(dev_priv,
+	intel_display_power_put_unchecked(i915,
  					  intel_aux_power_domain(intel_dig_port));
  }
@@ -2615,9 +2615,9 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
   */
  static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
if (!intel_dp_is_edp(intel_dp))
  		return;
@@ -2635,11 +2635,11 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 pp;
  	i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
if (!intel_dp_is_edp(intel_dp))
  		return;
@@ -2656,7 +2656,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  	pp = ironlake_get_pp_control(intel_dp);
-	if (IS_GEN(dev_priv, 5)) {
+	if (IS_GEN(i915, 5)) {
  		/* ILK workaround: disable reset around power sequence */
  		pp &= ~PANEL_POWER_RESET;
  		I915_WRITE(pp_ctrl_reg, pp);
@@ -2664,7 +2664,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
  	}
pp |= PANEL_POWER_ON;
-	if (!IS_GEN(dev_priv, 5))
+	if (!IS_GEN(i915, 5))
  		pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2673,7 +2673,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
  	wait_panel_on(intel_dp);
  	intel_dp->last_power_on = jiffies;
- if (IS_GEN(dev_priv, 5)) {
+	if (IS_GEN(i915, 5)) {
  		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
  		I915_WRITE(pp_ctrl_reg, pp);
  		POSTING_READ(pp_ctrl_reg);
@@ -2694,12 +2694,12 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	u32 pp;
  	i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
if (!intel_dp_is_edp(intel_dp))
  		return;
@@ -2727,7 +2727,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
  	intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
-	intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
+	intel_display_power_put_unchecked(i915, intel_aux_power_domain(dig_port));
  }
void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2744,7 +2744,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
  /* Enable backlight in the panel power control. */
  static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	intel_wakeref_t wakeref;
/*
@@ -2785,7 +2785,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
  /* Disable backlight in the panel power control. */
  static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -2849,7 +2849,7 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
  static void assert_dp_port(struct intel_dp *intel_dp, bool state)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
@@ -2859,7 +2859,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
  }
  #define assert_dp_port_disabled(d) assert_dp_port((d), false)
-static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
+static void assert_edp_pll(struct drm_i915_private *i915, bool state)
  {
  	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
@@ -2874,11 +2874,11 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
  				const struct intel_crtc_state *pipe_config)
  {
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(i915, crtc->pipe);
  	assert_dp_port_disabled(intel_dp);
-	assert_edp_pll_disabled(dev_priv);
+	assert_edp_pll_disabled(i915);
DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
  		      pipe_config->port_clock);
@@ -2900,8 +2900,8 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
  	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
  	 * 2. Program DP PLL enable
  	 */
-	if (IS_GEN(dev_priv, 5))
-		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
+	if (IS_GEN(i915, 5))
+		intel_wait_for_vblank_if_active(i915, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE; @@ -2914,11 +2914,11 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
  				 const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(i915, crtc->pipe);
  	assert_dp_port_disabled(intel_dp);
-	assert_edp_pll_enabled(dev_priv);
+	assert_edp_pll_enabled(i915);
DRM_DEBUG_KMS("disabling eDP PLL\n"); @@ -2999,12 +2999,12 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
  }
-static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+static bool cpt_dp_port_selected(struct drm_i915_private *i915,
  				 enum port port, enum pipe *pipe)
  {
  	enum pipe p;
- for_each_pipe(dev_priv, p) {
+	for_each_pipe(i915, p) {
  		u32 val = I915_READ(TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
@@ -3021,7 +3021,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
  	return false;
  }
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_dp_port_enabled(struct drm_i915_private *i915,
  			   i915_reg_t dp_reg, enum port port,
  			   enum pipe *pipe)
  {
@@ -3033,11 +3033,11 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
  	ret = val & DP_PORT_EN;
/* asserts want to know the pipe even if the port is disabled */
-	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+	if (IS_IVYBRIDGE(i915) && port == PORT_A)
  		*pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
-	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
-		ret &= cpt_dp_port_selected(dev_priv, port, pipe);
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (HAS_PCH_CPT(i915) && port != PORT_A)
+		ret &= cpt_dp_port_selected(i915, port, pipe);
+	else if (IS_CHERRYVIEW(i915))
  		*pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
  	else
  		*pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
@@ -3048,20 +3048,20 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
  static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  				  enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	intel_wakeref_t wakeref;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
- ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+	ret = intel_dp_port_enabled(i915, intel_dp->output_reg,
  				    encoder->port, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
  }
@@ -3069,7 +3069,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  static void intel_dp_get_config(struct intel_encoder *encoder,
  				struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	u32 tmp, flags = 0;
  	enum port port = encoder->port;
@@ -3084,7 +3084,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; - if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+	if (HAS_PCH_CPT(i915) && port != PORT_A) {
  		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -3110,7 +3110,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags; - if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+	if (IS_G4X(i915) && tmp & DP_COLOR_RANGE_16_235)
  		pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -3129,8 +3129,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
  		intel_dotclock_calculate(pipe_config->port_clock,
  					 &pipe_config->dp_m_n);
- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
-	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
+	if (intel_dp_is_edp(intel_dp) && i915->vbt.edp.bpp &&
+	    pipe_config->pipe_bpp > i915->vbt.edp.bpp) {
  		/*
  		 * This is a big fat ugly hack.
  		 *
@@ -3145,8 +3145,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
  		 * load.
  		 */
  		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
-		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
+			      pipe_config->pipe_bpp, i915->vbt.edp.bpp);
+		i915->vbt.edp.bpp = pipe_config->pipe_bpp;
  	}
  }
@@ -3215,16 +3215,16 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
  				const struct intel_crtc_state *old_crtc_state,
  				const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
intel_dp_link_down(encoder, old_crtc_state); - vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Assert data lane reset */
  	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
static void
@@ -3232,7 +3232,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
  			 u32 *DP,
  			 u8 dp_train_pat)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum port port = intel_dig_port->base.port;
  	u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
@@ -3241,7 +3241,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
  		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
  			      dp_train_pat & train_pat_mask);
- if (HAS_DDI(dev_priv)) {
+	if (HAS_DDI(i915)) {
  		u32 temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -3270,8 +3270,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
  		}
  		I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
-		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+	} else if ((IS_IVYBRIDGE(i915) && port == PORT_A) ||
+		   (HAS_PCH_CPT(i915) && port != PORT_A)) {
  		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -3314,7 +3314,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
  static void intel_dp_enable_port(struct intel_dp *intel_dp,
  				 const struct intel_crtc_state *old_crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* enable with pattern 1 (as per spec) */ @@ -3338,7 +3338,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
  			    const struct intel_crtc_state *pipe_config,
  			    const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
  	u32 dp_reg = I915_READ(intel_dp->output_reg);
@@ -3349,7 +3349,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
  		return;
with_pps_lock(intel_dp, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  			vlv_init_panel_power_sequencer(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -3359,13 +3359,13 @@ static void intel_enable_dp(struct intel_encoder *encoder,
  		edp_panel_vdd_off(intel_dp, true);
  	}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev_priv))
+		if (IS_CHERRYVIEW(i915))
  			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
- vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+		vlv_wait_port_ready(i915, dp_to_dig_port(intel_dp),
  				    lane_mask);
  	}
@@ -3412,7 +3412,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
  static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
  	enum pipe pipe = intel_dp->pps_pipe;
  	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
@@ -3440,14 +3440,14 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
  	intel_dp->pps_pipe = INVALID_PIPE;
  }
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct drm_i915_private *i915,
  				      enum pipe pipe)
  {
  	struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+	for_each_intel_dp(&i915->drm, encoder) {
  		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  		enum port port = encoder->port;
@@ -3469,11 +3469,11 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
  static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
  					   const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE); @@ -3491,7 +3491,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
  	 * We may be stealing the power
  	 * sequencer from another port.
  	 */
-	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+	vlv_steal_power_sequencer(i915, crtc->pipe);
intel_dp->active_pipe = crtc->pipe; @@ -3570,17 +3570,17 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATU
  u8
  intel_dp_voltage_max(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
  	enum port port = encoder->port;
- if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		return intel_ddi_dp_voltage_max(encoder);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-	else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+	else if (IS_IVYBRIDGE(i915) && port == PORT_A)
  		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
-	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+	else if (HAS_PCH_CPT(i915) && port != PORT_A)
  		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  	else
  		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3589,13 +3589,13 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
  u8
  intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
  	enum port port = encoder->port;
- if (HAS_DDI(dev_priv)) {
+	if (HAS_DDI(i915)) {
  		return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  			return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3607,7 +3607,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
  		default:
  			return DP_TRAIN_PRE_EMPH_LEVEL_0;
  		}
-	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+	} else if (IS_IVYBRIDGE(i915) && port == PORT_A) {
  		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  			return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3901,25 +3901,25 @@ ivb_cpu_edp_signal_levels(u8 train_set)
  void
  intel_dp_set_signal_levels(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum port port = intel_dig_port->base.port;
  	u32 signal_levels, mask = 0;
  	u8 train_set = intel_dp->train_set[0];
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+	if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) {
  		signal_levels = bxt_signal_levels(intel_dp);
-	} else if (HAS_DDI(dev_priv)) {
+	} else if (HAS_DDI(i915)) {
  		signal_levels = ddi_signal_levels(intel_dp);
  		mask = DDI_BUF_EMP_MASK;
-	} else if (IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_CHERRYVIEW(i915)) {
  		signal_levels = chv_signal_levels(intel_dp);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		signal_levels = vlv_signal_levels(intel_dp);
-	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+	} else if (IS_IVYBRIDGE(i915) && port == PORT_A) {
  		signal_levels = ivb_cpu_edp_signal_levels(train_set);
  		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
-	} else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
+	} else if (IS_GEN(i915, 6) && port == PORT_A) {
  		signal_levels = snb_cpu_edp_signal_levels(train_set);
  		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
  	} else {
@@ -3947,7 +3947,7 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
  				       u8 dp_train_pat)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(intel_dig_port->base.base.dev);
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
@@ -3958,12 +3958,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	enum port port = intel_dig_port->base.port;
  	u32 val;
- if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		return;
val = I915_READ(DP_TP_CTL(port));
@@ -3981,7 +3981,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
  	if (port == PORT_A)
  		return;
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
+	if (intel_wait_for_register(&i915->uncore, DP_TP_STATUS(port),
  				    DP_TP_STATUS_IDLE_DONE,
  				    DP_TP_STATUS_IDLE_DONE,
  				    1))
@@ -3992,7 +3992,7 @@ static void
  intel_dp_link_down(struct intel_encoder *encoder,
  		   const struct intel_crtc_state *old_crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	enum port port = encoder->port;
@@ -4003,8 +4003,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n"); - if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
-	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+	if ((IS_IVYBRIDGE(i915) && port == PORT_A) ||
+	    (HAS_PCH_CPT(i915) && port != PORT_A)) {
  		DP &= ~DP_LINK_TRAIN_MASK_CPT;
  		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
  	} else {
@@ -4023,13 +4023,13 @@ intel_dp_link_down(struct intel_encoder *encoder,
  	 * to transcoder A after disabling it to allow the
  	 * matching HDMI port to be enabled on transcoder A.
  	 */
-	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+	if (HAS_PCH_IBX(i915) && crtc->pipe == PIPE_B && port != PORT_A) {
  		/*
  		 * We get CPU/PCH FIFO underruns on the other pipe when
  		 * doing the workaround. Sweep them under the rug.
  		 */
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, false);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
  		DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
@@ -4042,16 +4042,16 @@ intel_dp_link_down(struct intel_encoder *encoder,
  		I915_WRITE(intel_dp->output_reg, DP);
  		POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+		intel_wait_for_vblank_if_active(i915, PIPE_A);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, true);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, true);
  	}
msleep(intel_dp->panel_power_down_delay); intel_dp->DP = DP; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		intel_wakeref_t wakeref;
with_pps_lock(intel_dp, wakeref)
@@ -4156,7 +4156,7 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
  static bool
  intel_edp_init_dpcd(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
/* this function is meant to be called only once */
@@ -4169,7 +4169,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
  			 drm_dp_is_branch(intel_dp->dpcd));
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
-		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+		i915->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
  			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
/*
@@ -4230,7 +4230,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
  	intel_dp_set_common_rates(intel_dp);
/* Read the eDP DSC DPCD registers */
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		intel_dp_get_dsc_sink_cap(intel_dp);
return true;
@@ -4804,7 +4804,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
  int intel_dp_retrain_link(struct intel_encoder *encoder,
  			  struct drm_modeset_acquire_ctx *ctx)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_connector *connector = intel_dp->attached_connector;
  	struct drm_connector_state *conn_state;
@@ -4817,7 +4817,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
  	if (!connector || connector->base.status != connector_status_connected)
  		return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex,
  			       ctx);
  	if (ret)
  		return ret;
@@ -4847,20 +4847,20 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
  		return 0;
/* Suppress underruns caused by re-training */
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+	intel_set_cpu_fifo_underrun_reporting(i915, crtc->pipe, false);
  	if (crtc_state->has_pch_encoder)
-		intel_set_pch_fifo_underrun_reporting(dev_priv,
+		intel_set_pch_fifo_underrun_reporting(i915,
  						      intel_crtc_pch_transcoder(crtc), false);
intel_dp_start_link_train(intel_dp);
  	intel_dp_stop_link_train(intel_dp);
/* Keep underrun reporting disabled until things are stable */
-	intel_wait_for_vblank(dev_priv, crtc->pipe);
+	intel_wait_for_vblank(i915, crtc->pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, crtc->pipe, true);
  	if (crtc_state->has_pch_encoder)
-		intel_set_pch_fifo_underrun_reporting(dev_priv,
+		intel_set_pch_fifo_underrun_reporting(i915,
  						      intel_crtc_pch_transcoder(crtc), true);
return 0;
@@ -4946,7 +4946,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
  static bool
  intel_dp_short_pulse(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u8 old_sink_count = intel_dp->sink_count;
  	bool ret;
@@ -4983,7 +4983,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
  	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
  		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
  		/* Send a Hotplug Uevent to userspace to start modeset */
-		drm_kms_helper_hotplug_event(&dev_priv->drm);
+		drm_kms_helper_hotplug_event(&i915->drm);
  	}
return true;
@@ -5052,7 +5052,7 @@ edp_detect(struct intel_dp *intel_dp)
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5075,7 +5075,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder)
static bool cpt_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5098,7 +5098,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder)
static bool spt_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5117,7 +5117,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder)
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5140,7 +5140,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5163,7 +5163,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
  		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
@@ -5173,7 +5173,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
static bool snb_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
  		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
@@ -5183,7 +5183,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder)
static bool ivb_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
  		return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
@@ -5193,7 +5193,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder)
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
  		return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
@@ -5203,7 +5203,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
static bool bxt_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 bit;
switch (encoder->hpd_pin) {
@@ -5224,7 +5224,7 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
  	return I915_READ(GEN8_DE_PORT_ISR) & bit;
  }
-static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
+static bool icl_combo_port_connected(struct drm_i915_private *i915,
  				     struct intel_digital_port *intel_dig_port)
  {
  	enum port port = intel_dig_port->base.port;
@@ -5247,7 +5247,7 @@ static const char *tc_type_name(enum tc_port_type type)
  	return names[type];
  }
-static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
+static void icl_update_tc_port_type(struct drm_i915_private *i915,
  				    struct intel_digital_port *intel_dig_port,
  				    bool is_legacy, bool is_typec, bool is_tbt)
  {
@@ -5295,10 +5295,10 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
   * will require a lot of coordination with user space and thorough testing for
   * the extra possible cases.
   */
-static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
+static bool icl_tc_phy_connect(struct drm_i915_private *i915,
  			       struct intel_digital_port *dig_port)
  {
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
  	u32 val;
if (dig_port->tc_type != TC_PORT_LEGACY &&
@@ -5329,7 +5329,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
  	if (dig_port->tc_type == TC_PORT_TYPEC &&
  	    !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
  		DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
-		icl_tc_phy_disconnect(dev_priv, dig_port);
+		icl_tc_phy_disconnect(i915, dig_port);
  		return false;
  	}
@@ -5340,10 +5340,10 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
   * See the comment at the connect function. This implements the Disconnect
   * Flow.
   */
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+void icl_tc_phy_disconnect(struct drm_i915_private *i915,
  			   struct intel_digital_port *dig_port)
  {
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
if (dig_port->tc_type == TC_PORT_UNKNOWN)
  		return;
@@ -5378,11 +5378,11 @@ void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
   * connected ports are usable, and avoids exposing to the users objects they
   * can't really use.
   */
-static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
+static bool icl_tc_port_connected(struct drm_i915_private *i915,
  				  struct intel_digital_port *intel_dig_port)
  {
  	enum port port = intel_dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
  	bool is_legacy, is_typec, is_tbt;
  	u32 dpsp;
@@ -5407,15 +5407,15 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
  	is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
if (!is_legacy && !is_typec && !is_tbt) {
-		icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+		icl_tc_phy_disconnect(i915, intel_dig_port);
return false;
  	}
- icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
+	icl_update_tc_port_type(i915, intel_dig_port, is_legacy, is_typec,
  				is_tbt);
- if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
+	if (!icl_tc_phy_connect(i915, intel_dig_port))
  		return false;
return true;
@@ -5423,13 +5423,13 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
static bool icl_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- if (intel_port_is_combophy(dev_priv, encoder->port))
-		return icl_combo_port_connected(dev_priv, dig_port);
-	else if (intel_port_is_tc(dev_priv, encoder->port))
-		return icl_tc_port_connected(dev_priv, dig_port);
+	if (intel_port_is_combophy(i915, encoder->port))
+		return icl_combo_port_connected(i915, dig_port);
+	else if (intel_port_is_tc(i915, encoder->port))
+		return icl_tc_port_connected(i915, dig_port);
  	else
  		MISSING_CASE(encoder->hpd_pin);
@@ -5449,41 +5449,41 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
   */
  static bool __intel_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_GMCH(dev_priv)) {
-		if (IS_GM45(dev_priv))
+	if (HAS_GMCH(i915)) {
+		if (IS_GM45(i915))
  			return gm45_digital_port_connected(encoder);
  		else
  			return g4x_digital_port_connected(encoder);
  	}
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return icl_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
+	else if (IS_GEN(i915, 10) || IS_GEN9_BC(i915))
  		return spt_digital_port_connected(encoder);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		return bxt_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 8))
+	else if (IS_GEN(i915, 8))
  		return bdw_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 7))
+	else if (IS_GEN(i915, 7))
  		return ivb_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 6))
+	else if (IS_GEN(i915, 6))
  		return snb_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 5))
+	else if (IS_GEN(i915, 5))
  		return ilk_digital_port_connected(encoder);
- MISSING_CASE(INTEL_GEN(dev_priv));
+	MISSING_CASE(INTEL_GEN(i915));
  	return false;
  }
bool intel_digital_port_connected(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	bool is_connected = false;
  	intel_wakeref_t wakeref;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
  		is_connected = __intel_digital_port_connected(encoder);
return is_connected;
@@ -5537,7 +5537,7 @@ intel_dp_detect(struct drm_connector *connector,
  		struct drm_modeset_acquire_ctx *ctx,
  		bool force)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_dp *intel_dp = intel_attached_dp(connector);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	struct intel_encoder *encoder = &dig_port->base;
@@ -5545,7 +5545,7 @@ intel_dp_detect(struct drm_connector *connector,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  		      connector->base.id, connector->name);
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	WARN_ON(!drm_modeset_is_locked(&i915->drm.mode_config.connection_mutex));
/* Can't disconnect eDP */
  	if (intel_dp_is_edp(intel_dp))
@@ -5584,7 +5584,7 @@ intel_dp_detect(struct drm_connector *connector,
  	intel_dp_print_rates(intel_dp);
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		intel_dp_get_dsc_sink_cap(intel_dp);
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
@@ -5642,7 +5642,7 @@ intel_dp_force(struct drm_connector *connector)
  	struct intel_dp *intel_dp = intel_attached_dp(connector);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  	struct intel_encoder *intel_encoder = &dig_port->base;
-	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
  	enum intel_display_power_domain aux_domain =
  		intel_aux_power_domain(dig_port);
  	intel_wakeref_t wakeref;
@@ -5654,11 +5654,11 @@ intel_dp_force(struct drm_connector *connector)
  	if (connector->status != connector_status_connected)
  		return;
- wakeref = intel_display_power_get(dev_priv, aux_domain);
+	wakeref = intel_display_power_get(i915, aux_domain);
intel_dp_set_edid(intel_dp); - intel_display_power_put(dev_priv, aux_domain, wakeref);
+	intel_display_power_put(i915, aux_domain, wakeref);
  }
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -6362,10 +6362,10 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
if (!edp_have_panel_vdd(intel_dp))
  		return;
@@ -6377,18 +6377,18 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
  	 * indefinitely.
  	 */
  	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
-	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
+	intel_display_power_get(i915, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
  }
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
  	enum pipe pipe;
- if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+	if (intel_dp_port_enabled(i915, intel_dp->output_reg,
  				  encoder->port, &pipe))
  		return pipe;
@@ -6397,12 +6397,12 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *i915 = to_i915(encoder->dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
  	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
  	intel_wakeref_t wakeref;
- if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		intel_dp->DP = I915_READ(intel_dp->output_reg);
if (lspcon->active)
@@ -6410,12 +6410,12 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true; - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+	if (!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915) &&
  	    !intel_dp_is_edp(intel_dp))
  		return;
with_pps_lock(intel_dp, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  			intel_dp->active_pipe = vlv_active_pipe(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
@@ -6508,41 +6508,41 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
  }
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
  {
  	/*
  	 * eDP not supported on g4x. so bail out early just
  	 * for a bit extra safety in case the VBT is bonkers.
  	 */
-	if (INTEL_GEN(dev_priv) < 5)
+	if (INTEL_GEN(i915) < 5)
  		return false;
- if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
+	if (INTEL_GEN(i915) < 9 && port == PORT_A)
  		return true;
- return intel_bios_is_port_edp(dev_priv, port);
+	return intel_bios_is_port_edp(i915, port);
  }
static void
  intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	enum port port = dp_to_dig_port(intel_dp)->base.port;
- if (!IS_G4X(dev_priv) && port != PORT_A)
+	if (!IS_G4X(i915) && port != PORT_A)
  		intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-	if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		drm_connector_attach_max_bpc_property(connector, 6, 10);
-	else if (INTEL_GEN(dev_priv) >= 5)
+	else if (INTEL_GEN(i915) >= 5)
  		drm_connector_attach_max_bpc_property(connector, 6, 12);
if (intel_dp_is_edp(intel_dp)) {
  		u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
-		if (!HAS_GMCH(dev_priv))
+		if (!HAS_GMCH(i915))
  			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
@@ -6562,7 +6562,7 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
  static void
  intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 pp_on, pp_off, pp_ctl;
  	struct pps_registers regs;
@@ -6571,7 +6571,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
  	pp_ctl = ironlake_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
-	if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_on = I915_READ(regs.pp_on);
@@ -6621,11 +6621,11 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
  static void
  intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct edp_power_seq cur, vbt, spec,
  		*final = &intel_dp->pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
/* already initialized? */
  	if (final->t11_t12 != 0)
@@ -6635,13 +6635,13 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_pps_dump_state("cur", &cur); - vbt = dev_priv->vbt.edp.pps;
+	vbt = i915->vbt.edp.pps;
  	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
  	 * of 500ms appears to be too short. Ocassionally the panel
  	 * just fails to power back on. Increasing the delay to 800ms
  	 * seems sufficient to avoid this problem.
  	 */
-	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+	if (i915->quirks & QUIRK_INCREASE_T12_DELAY) {
  		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
  		DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
  			      vbt.t11_t12);
@@ -6714,14 +6714,14 @@ static void
  intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
  					      bool force_disable_vdd)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 pp_on, pp_off, port_sel = 0;
-	int div = dev_priv->rawclk_freq / 1000;
+	int div = i915->rawclk_freq / 1000;
  	struct pps_registers regs;
  	enum port port = dp_to_dig_port(intel_dp)->base.port;
  	const struct edp_power_seq *seq = &intel_dp->pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+	lockdep_assert_held(&i915->pps_mutex);
intel_pps_get_registers(intel_dp, &regs); @@ -6757,9 +6757,9 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, /* Haswell doesn't have any port selection bits for the panel
  	 * power sequencer any more. */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		port_sel = PANEL_PORT_SELECT_VLV(port);
-	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+	} else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915)) {
  		switch (port) {
  		case PORT_A:
  			port_sel = PANEL_PORT_SELECT_DPA;
@@ -6807,9 +6807,9 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
static void intel_dp_pps_init(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		vlv_initial_power_sequencer_setup(intel_dp);
  	} else {
  		intel_dp_init_panel_power_sequencer(intel_dp);
@@ -6819,7 +6819,7 @@ static void intel_dp_pps_init(struct intel_dp *intel_dp)
/**
   * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @crtc_state: a pointer to the active intel_crtc_state
   * @refresh_rate: RR to be programmed
   *
@@ -6828,15 +6828,15 @@ static void intel_dp_pps_init(struct intel_dp *intel_dp)
   * supported by the panel or to any other RR based on media playback (in
   * this case, RR value needs to be passed from user space).
   *
- * The caller of this function needs to take a lock on dev_priv->drrs.
+ * The caller of this function needs to take a lock on i915->drrs.
   */
-static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+static void intel_dp_set_drrs_state(struct drm_i915_private *i915,
  				    const struct intel_crtc_state *crtc_state,
  				    int refresh_rate)
  {
  	struct intel_encoder *encoder;
  	struct intel_digital_port *dig_port = NULL;
-	struct intel_dp *intel_dp = dev_priv->drrs.dp;
+	struct intel_dp *intel_dp = i915->drrs.dp;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
@@ -6858,7 +6858,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  		return;
  	}
- if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
+	if (i915->drrs.type < SEAMLESS_DRRS_SUPPORT) {
  		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
  		return;
  	}
@@ -6867,7 +6867,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  			refresh_rate)
  		index = DRRS_LOW_RR;
- if (index == dev_priv->drrs.refresh_rate_type) {
+	if (index == i915->drrs.refresh_rate_type) {
  		DRM_DEBUG_KMS(
  			"DRRS requested for previously set RR...ignoring\n");
  		return;
@@ -6878,7 +6878,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  		return;
  	}
- if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
+	if (INTEL_GEN(i915) >= 8 && !IS_CHERRYVIEW(i915)) {
  		switch (index) {
  		case DRRS_HIGH_RR:
  			intel_dp_set_m_n(crtc_state, M1_N1);
@@ -6890,18 +6890,18 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  		default:
  			DRM_ERROR("Unsupported refreshrate type\n");
  		}
-	} else if (INTEL_GEN(dev_priv) > 6) {
+	} else if (INTEL_GEN(i915) > 6) {
  		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
  		u32 val;
val = I915_READ(reg);
  		if (index > DRRS_HIGH_RR) {
-			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+			if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
  			else
  				val |= PIPECONF_EDP_RR_MODE_SWITCH;
  		} else {
-			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+			if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
  			else
  				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -6909,7 +6909,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  		I915_WRITE(reg, val);
  	}
- dev_priv->drrs.refresh_rate_type = index;
+	i915->drrs.refresh_rate_type = index;
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
  }
@@ -6924,30 +6924,30 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
  void intel_edp_drrs_enable(struct intel_dp *intel_dp,
  			   const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!crtc_state->has_drrs) {
  		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
  		return;
  	}
- if (dev_priv->psr.enabled) {
+	if (i915->psr.enabled) {
  		DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
  		return;
  	}
- mutex_lock(&dev_priv->drrs.mutex);
-	if (dev_priv->drrs.dp) {
+	mutex_lock(&i915->drrs.mutex);
+	if (i915->drrs.dp) {
  		DRM_DEBUG_KMS("DRRS already enabled\n");
  		goto unlock;
  	}
- dev_priv->drrs.busy_frontbuffer_bits = 0;
+	i915->drrs.busy_frontbuffer_bits = 0;
- dev_priv->drrs.dp = intel_dp;
+	i915->drrs.dp = intel_dp;
unlock:
-	mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_unlock(&i915->drrs.mutex);
  }
/**
@@ -6959,36 +6959,36 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
  void intel_edp_drrs_disable(struct intel_dp *intel_dp,
  			    const struct intel_crtc_state *old_crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!old_crtc_state->has_drrs)
  		return;
- mutex_lock(&dev_priv->drrs.mutex);
-	if (!dev_priv->drrs.dp) {
-		mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_lock(&i915->drrs.mutex);
+	if (!i915->drrs.dp) {
+		mutex_unlock(&i915->drrs.mutex);
  		return;
  	}
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+	if (i915->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(i915, old_crtc_state,
  			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
- dev_priv->drrs.dp = NULL;
-	mutex_unlock(&dev_priv->drrs.mutex);
+	i915->drrs.dp = NULL;
+	mutex_unlock(&i915->drrs.mutex);
- cancel_delayed_work_sync(&dev_priv->drrs.work);
+	cancel_delayed_work_sync(&i915->drrs.work);
  }
static void intel_edp_drrs_downclock_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), drrs.work.work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), drrs.work.work);
  	struct intel_dp *intel_dp;
- mutex_lock(&dev_priv->drrs.mutex);
+	mutex_lock(&i915->drrs.mutex);
- intel_dp = dev_priv->drrs.dp;
+	intel_dp = i915->drrs.dp;
if (!intel_dp)
  		goto unlock;
@@ -6998,23 +6998,23 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
  	 * recheck.
  	 */
- if (dev_priv->drrs.busy_frontbuffer_bits)
+	if (i915->drrs.busy_frontbuffer_bits)
  		goto unlock;
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+	if (i915->drrs.refresh_rate_type != DRRS_LOW_RR) {
  		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+		intel_dp_set_drrs_state(i915, to_intel_crtc(crtc)->config,
  			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
  	}
unlock:
-	mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_unlock(&i915->drrs.mutex);
  }
/**
   * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   *
   * This function gets called everytime rendering on the given planes start.
@@ -7022,40 +7022,40 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
   *
   * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
   */
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_edp_drrs_invalidate(struct drm_i915_private *i915,
  			       unsigned int frontbuffer_bits)
  {
  	struct drm_crtc *crtc;
  	enum pipe pipe;
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+	if (i915->drrs.type == DRRS_NOT_SUPPORTED)
  		return;
- cancel_delayed_work(&dev_priv->drrs.work);
+	cancel_delayed_work(&i915->drrs.work);
- mutex_lock(&dev_priv->drrs.mutex);
-	if (!dev_priv->drrs.dp) {
-		mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_lock(&i915->drrs.mutex);
+	if (!i915->drrs.dp) {
+		mutex_unlock(&i915->drrs.mutex);
  		return;
  	}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+	crtc = dp_to_dig_port(i915->drrs.dp)->base.base.crtc;
  	pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+	i915->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
/* invalidate means busy screen hence upclock */
-	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
-			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+	if (frontbuffer_bits && i915->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(i915, to_intel_crtc(crtc)->config,
+			i915->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
- mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_unlock(&i915->drrs.mutex);
  }
/**
   * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   *
   * This function gets called every time rendering on the given planes has
@@ -7065,42 +7065,42 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
   *
   * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
   */
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_edp_drrs_flush(struct drm_i915_private *i915,
  			  unsigned int frontbuffer_bits)
  {
  	struct drm_crtc *crtc;
  	enum pipe pipe;
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+	if (i915->drrs.type == DRRS_NOT_SUPPORTED)
  		return;
- cancel_delayed_work(&dev_priv->drrs.work);
+	cancel_delayed_work(&i915->drrs.work);
- mutex_lock(&dev_priv->drrs.mutex);
-	if (!dev_priv->drrs.dp) {
-		mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_lock(&i915->drrs.mutex);
+	if (!i915->drrs.dp) {
+		mutex_unlock(&i915->drrs.mutex);
  		return;
  	}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+	crtc = dp_to_dig_port(i915->drrs.dp)->base.base.crtc;
  	pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+	i915->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* flush means busy screen hence upclock */
-	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
-				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+	if (frontbuffer_bits && i915->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(i915, to_intel_crtc(crtc)->config,
+				i915->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
/*
  	 * flush also means no more activity hence schedule downclock, if all
  	 * other fbs are quiescent too
  	 */
-	if (!dev_priv->drrs.busy_frontbuffer_bits)
-		schedule_delayed_work(&dev_priv->drrs.work,
+	if (!i915->drrs.busy_frontbuffer_bits)
+		schedule_delayed_work(&i915->drrs.work,
  				msecs_to_jiffies(1000));
-	mutex_unlock(&dev_priv->drrs.mutex);
+	mutex_unlock(&i915->drrs.mutex);
  }
/**
@@ -7157,18 +7157,18 @@ static struct drm_display_mode *
  intel_dp_drrs_init(struct intel_connector *connector,
  		   struct drm_display_mode *fixed_mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct drm_display_mode *downclock_mode = NULL;
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
-	mutex_init(&dev_priv->drrs.mutex);
+	INIT_DELAYED_WORK(&i915->drrs.work, intel_edp_drrs_downclock_work);
+	mutex_init(&i915->drrs.mutex);
- if (INTEL_GEN(dev_priv) <= 6) {
+	if (INTEL_GEN(i915) <= 6) {
  		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
  		return NULL;
  	}
- if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+	if (i915->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
  		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
  		return NULL;
  	}
@@ -7179,9 +7179,9 @@ intel_dp_drrs_init(struct intel_connector *connector,
  		return NULL;
  	}
- dev_priv->drrs.type = dev_priv->vbt.drrs_type;
+	i915->drrs.type = i915->vbt.drrs_type;
- dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
+	i915->drrs.refresh_rate_type = DRRS_HIGH_RR;
  	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
  	return downclock_mode;
  }
@@ -7189,8 +7189,8 @@ intel_dp_drrs_init(struct intel_connector *connector,
  static bool intel_edp_init_connector(struct intel_dp *intel_dp,
  				     struct intel_connector *intel_connector)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector = &intel_connector->base;
  	struct drm_display_mode *fixed_mode = NULL;
  	struct drm_display_mode *downclock_mode = NULL;
@@ -7210,8 +7210,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
  	 * eDP and LVDS bail out early in this case to prevent interfering
  	 * with an already powered-on LVDS power sequencer.
  	 */
-	if (intel_get_lvds_encoder(dev_priv)) {
-		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+	if (intel_get_lvds_encoder(i915)) {
+		WARN_ON(!(HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915)));
  		DRM_INFO("LVDS was detected, not registering eDP\n");
return false;
@@ -7256,7 +7256,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
  		fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
  	mutex_unlock(&dev->mode_config.mutex);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
  		register_reboot_notifier(&intel_dp->edp_notifier);
@@ -7330,7 +7330,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  	struct intel_dp *intel_dp = &intel_dig_port->dp;
  	struct intel_encoder *intel_encoder = &intel_dig_port->base;
  	struct drm_device *dev = intel_encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum port port = intel_encoder->port;
  	int type;
@@ -7353,18 +7353,18 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  	intel_dp->DP = I915_READ(intel_dp->output_reg);
  	intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port)) {
+	if (intel_dp_is_port_edp(i915, port)) {
  		/*
  		 * Currently we don't support eDP on TypeC ports, although in
  		 * theory it could work on TypeC legacy ports.
  		 */
-		WARN_ON(intel_port_is_tc(dev_priv, port));
+		WARN_ON(intel_port_is_tc(i915, port));
  		type = DRM_MODE_CONNECTOR_eDP;
  	} else {
  		type = DRM_MODE_CONNECTOR_DisplayPort;
  	}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
/*
@@ -7376,7 +7376,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  		intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
-	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if (WARN_ON((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  		    intel_dp_is_edp(intel_dp) &&
  		    port != PORT_B && port != PORT_C))
  		return false;
@@ -7388,26 +7388,26 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
  	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv))
+	if (!HAS_GMCH(i915))
  		connector->interlace_allowed = true;
  	connector->doublescan_allowed = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		connector->ycbcr_420_allowed = true;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+	intel_encoder->hpd_pin = intel_hpd_pin_default(i915, port);
intel_dp_aux_init(intel_dp); intel_connector_attach_encoder(intel_connector, intel_encoder); - if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
  	else
  		intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
-	if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
+	if (HAS_DP_MST(i915) && !intel_dp_is_edp(intel_dp) &&
  	    (port == PORT_B || port == PORT_C ||
  	     port == PORT_D || port == PORT_F))
  		intel_dp_mst_encoder_init(intel_dig_port,
@@ -7421,7 +7421,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_add_properties(intel_dp, connector); - if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
+	if (is_hdcp_supported(i915, port) && !intel_dp_is_edp(intel_dp)) {
  		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
  		if (ret)
  			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
@@ -7431,7 +7431,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  	 * 0xd.  Failure to do so will result in spurious interrupts being
  	 * generated on the port when a cable is not attached.
  	 */
-	if (IS_G45(dev_priv)) {
+	if (IS_G45(i915)) {
  		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
  		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
  	}
@@ -7444,7 +7444,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  	return false;
  }
-bool intel_dp_init(struct drm_i915_private *dev_priv,
+bool intel_dp_init(struct drm_i915_private *i915,
  		   i915_reg_t output_reg,
  		   enum port port)
  {
@@ -7464,7 +7464,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
  	intel_encoder = &intel_dig_port->base;
  	encoder = &intel_encoder->base;
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+	if (drm_encoder_init(&i915->drm, &intel_encoder->base,
  			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
  			     "DP %c", port_name(port)))
  		goto err_encoder_init;
@@ -7475,14 +7475,14 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
  	intel_encoder->get_config = intel_dp_get_config;
  	intel_encoder->update_pipe = intel_panel_update_backlight;
  	intel_encoder->suspend = intel_dp_encoder_suspend;
-	if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
  		intel_encoder->pre_enable = chv_pre_enable_dp;
  		intel_encoder->enable = vlv_enable_dp;
  		intel_encoder->disable = vlv_disable_dp;
  		intel_encoder->post_disable = chv_post_disable_dp;
  		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
  		intel_encoder->pre_enable = vlv_pre_enable_dp;
  		intel_encoder->enable = vlv_enable_dp;
@@ -7500,7 +7500,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->type = INTEL_OUTPUT_DP;
  	intel_encoder->power_domain = intel_port_to_power_domain(port);
-	if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		if (port == PORT_D)
  			intel_encoder->crtc_mask = 1 << 2;
  		else
@@ -7516,7 +7516,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
  	if (port != PORT_A)
  		intel_infoframe_init(intel_dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(i915, port);
  	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
  		goto err_init_connector;
@@ -7531,11 +7531,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
  	return false;
  }
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
+void intel_dp_mst_suspend(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DDI)
@@ -7551,11 +7551,11 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
  	}
  }
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
+void intel_dp_mst_resume(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		struct intel_dp *intel_dp;
  		int ret;
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
index da70b1a41c83..b6fecb185b23 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -35,10 +35,10 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
  bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state);
  int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_dp_port_enabled(struct drm_i915_private *i915,
  			   i915_reg_t dp_reg, enum port port,
  			   enum pipe *pipe);
-bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+bool intel_dp_init(struct drm_i915_private *i915, i915_reg_t output_reg,
  		   enum port port);
  bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  			     struct intel_connector *intel_connector);
@@ -60,7 +60,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
  			    struct intel_crtc_state *pipe_config,
  			    struct drm_connector_state *conn_state);
  bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port);
  enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
  				  bool long_hpd);
  void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -69,21 +69,21 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
  void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
  void intel_edp_panel_on(struct intel_dp *intel_dp);
  void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+void intel_dp_mst_suspend(struct drm_i915_private *i915);
+void intel_dp_mst_resume(struct drm_i915_private *i915);
  int intel_dp_max_link_rate(struct intel_dp *intel_dp);
  int intel_dp_max_lane_count(struct intel_dp *intel_dp);
  int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
+void intel_power_sequencer_reset(struct drm_i915_private *i915);
  u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
  			   const struct intel_crtc_state *crtc_state);
  void intel_edp_drrs_disable(struct intel_dp *intel_dp,
  			    const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_edp_drrs_invalidate(struct drm_i915_private *i915,
  			       unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_edp_drrs_flush(struct drm_i915_private *i915,
  			  unsigned int frontbuffer_bits);
void
@@ -112,7 +112,7 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
  int intel_dp_link_required(int pixel_clock, int bpp);
  int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
  bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+void icl_tc_phy_disconnect(struct drm_i915_private *i915,
  			   struct intel_digital_port *dig_port);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 7ded95a334db..7a3bd29dcf0b 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -109,7 +109,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
   */
  static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
  	int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
  	u8 pn, pn_min, pn_max;
@@ -118,7 +118,7 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
  	 * Note that, if F x P is out of supported range, the maximum value or
  	 * minimum value will applied automatically. So no need to check that.
  	 */
-	freq = dev_priv->vbt.backlight.pwm_freq_hz;
+	freq = i915->vbt.backlight.pwm_freq_hz;
  	DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
  	if (!freq) {
  		DRM_DEBUG_KMS("Use panel default backlight frequency\n");
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 0caf645fbbb8..ab21dc690c9f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -91,7 +91,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
  				       struct intel_crtc_state *pipe_config,
  				       struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
  	struct intel_dp *intel_dp = &intel_mst->primary->dp;
  	struct intel_connector *connector =
@@ -140,11 +140,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
  	pipe_config->limited_color_range =
  		intel_dp_limited_color_range(pipe_config, conn_state);
- if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		pipe_config->lane_lat_optim_mask =
  			bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+	intel_ddi_compute_min_voltage_level(i915, pipe_config);
return 0;
  }
@@ -285,7 +285,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
  	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
  	struct intel_digital_port *intel_dig_port = intel_mst->primary;
  	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = intel_dig_port->base.port;
  	struct intel_connector *connector =
  		to_intel_connector(conn_state->connector);
@@ -332,12 +332,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
  	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
  	struct intel_digital_port *intel_dig_port = intel_mst->primary;
  	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DP_TP_STATUS(port),
  				    DP_TP_STATUS_ACT_SENT,
  				    DP_TP_STATUS_ACT_SENT,
@@ -495,7 +495,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
  	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_connector *intel_connector;
  	struct drm_connector *connector;
  	enum pipe pipe;
@@ -520,7 +520,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		struct drm_encoder *enc =
  			&intel_dp->mst_encoders[pipe]->base.base;
@@ -549,10 +549,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo static void intel_dp_register_mst_connector(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
- if (dev_priv->fbdev)
-		drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
+	if (i915->fbdev)
+		drm_fb_helper_add_one_connector(&i915->fbdev->helper,
  						connector);
drm_connector_register(connector);
@@ -561,13 +561,13 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
  static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
  					   struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
  	drm_connector_unregister(connector);
- if (dev_priv->fbdev)
-		drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
+	if (i915->fbdev)
+		drm_fb_helper_remove_one_connector(&i915->fbdev->helper,
  						   connector);
drm_connector_put(connector);
@@ -622,10 +622,10 @@ static bool
  intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
  {
  	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+	for_each_pipe(i915, pipe)
  		intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
  	return true;
  }
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index bdbe41759827..c85672bb1a7d 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -212,9 +212,9 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
  };
static const struct bxt_ddi_phy_info *
-bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+bxt_get_phy_list(struct drm_i915_private *i915, int *count)
  {
-	if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		*count =  ARRAY_SIZE(glk_ddi_phy_info);
  		return glk_ddi_phy_info;
  	} else {
@@ -224,22 +224,22 @@ bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
  }
static const struct bxt_ddi_phy_info *
-bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+bxt_get_phy_info(struct drm_i915_private *i915, enum dpio_phy phy)
  {
  	int count;
  	const struct bxt_ddi_phy_info *phy_list =
-		bxt_get_phy_list(dev_priv, &count);
+		bxt_get_phy_list(i915, &count);
return &phy_list[phy];
  }
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+void bxt_port_to_phy_channel(struct drm_i915_private *i915, enum port port,
  			     enum dpio_phy *phy, enum dpio_channel *ch)
  {
  	const struct bxt_ddi_phy_info *phy_info, *phys;
  	int i, count;
- phys = bxt_get_phy_list(dev_priv, &count);
+	phys = bxt_get_phy_list(i915, &count);
for (i = 0; i < count; i++) {
  		phy_info = &phys[i];
@@ -263,7 +263,7 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
  	*ch = DPIO_CH0;
  }
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *i915,
  				  enum port port, u32 margin, u32 scale,
  				  u32 enable, u32 deemphasis)
  {
@@ -271,7 +271,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
  	enum dpio_phy phy;
  	enum dpio_channel ch;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+	bxt_port_to_phy_channel(i915, port, &phy, &ch);
/*
  	 * While we write to the group register to program all lanes at once we
@@ -306,12 +306,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
  	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
  }
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *i915,
  			    enum dpio_phy phy)
  {
  	const struct bxt_ddi_phy_info *phy_info;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+	phy_info = bxt_get_phy_info(i915, phy);
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
  		return false;
@@ -334,37 +334,37 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
  	return true;
  }
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static u32 bxt_get_grc(struct drm_i915_private *i915, enum dpio_phy phy)
  {
  	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
  }
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+static void bxt_phy_wait_grc_done(struct drm_i915_private *i915,
  				  enum dpio_phy phy)
  {
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    BXT_PORT_REF_DW3(phy),
  				    GRC_DONE, GRC_DONE,
  				    10))
  		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
  }
-static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+static void _bxt_ddi_phy_init(struct drm_i915_private *i915,
  			      enum dpio_phy phy)
  {
  	const struct bxt_ddi_phy_info *phy_info;
  	u32 val;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+	phy_info = bxt_get_phy_info(i915, phy);
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+	if (bxt_ddi_phy_is_enabled(i915, phy)) {
  		/* Still read out the GRC value for state verification */
  		if (phy_info->rcomp_phy != -1)
-			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+			i915->bxt_phy_grc = bxt_get_grc(i915, phy);
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+		if (bxt_ddi_phy_verify_state(i915, phy)) {
  			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
  					 "won't reprogram it\n", phy);
  			return;
@@ -386,7 +386,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
  	 * The flag should get set in 100us according to the HW team, but
  	 * use 1ms due to occasional timeouts observed with that.
  	 */
-	if (intel_wait_for_register_fw(&dev_priv->uncore,
+	if (intel_wait_for_register_fw(&i915->uncore,
  				       BXT_PORT_CL1CM_DW0(phy),
  				       PHY_RESERVED | PHY_POWER_GOOD,
  				       PHY_POWER_GOOD,
@@ -419,14 +419,14 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
  	if (phy_info->rcomp_phy != -1) {
  		u32 grc_code;
- bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+		bxt_phy_wait_grc_done(i915, phy_info->rcomp_phy);
/*
  		 * PHY0 isn't connected to an RCOMP resistor so copy over
  		 * the corresponding calibrated value from PHY1, and disable
  		 * the automatic calibration on PHY0.
  		 */
-		val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+		val = i915->bxt_phy_grc = bxt_get_grc(i915,
  							  phy_info->rcomp_phy);
  		grc_code = val << GRC_CODE_FAST_SHIFT |
  			   val << GRC_CODE_SLOW_SHIFT |
@@ -446,12 +446,12 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
  	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
  }
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_ddi_phy_uninit(struct drm_i915_private *i915, enum dpio_phy phy)
  {
  	const struct bxt_ddi_phy_info *phy_info;
  	u32 val;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+	phy_info = bxt_get_phy_info(i915, phy);
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
  	val &= ~COMMON_RESET_DIS;
@@ -462,34 +462,34 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
  }
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_ddi_phy_init(struct drm_i915_private *i915, enum dpio_phy phy)
  {
  	const struct bxt_ddi_phy_info *phy_info =
-		bxt_get_phy_info(dev_priv, phy);
+		bxt_get_phy_info(i915, phy);
  	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
  	bool was_enabled;
- lockdep_assert_held(&dev_priv->power_domains.lock);
+	lockdep_assert_held(&i915->power_domains.lock);
was_enabled = true;
  	if (rcomp_phy != -1)
-		was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+		was_enabled = bxt_ddi_phy_is_enabled(i915, rcomp_phy);
/*
  	 * We need to copy the GRC calibration value from rcomp_phy,
  	 * so make sure it's powered up.
  	 */
  	if (!was_enabled)
-		_bxt_ddi_phy_init(dev_priv, rcomp_phy);
+		_bxt_ddi_phy_init(i915, rcomp_phy);
- _bxt_ddi_phy_init(dev_priv, phy);
+	_bxt_ddi_phy_init(i915, phy);
if (!was_enabled)
-		bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+		bxt_ddi_phy_uninit(i915, rcomp_phy);
  }
static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+__phy_reg_verify_state(struct drm_i915_private *i915, enum dpio_phy phy,
  		       i915_reg_t reg, u32 mask, u32 expected,
  		       const char *reg_fmt, ...)
  {
@@ -515,20 +515,20 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  	return false;
  }
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *i915,
  			      enum dpio_phy phy)
  {
  	const struct bxt_ddi_phy_info *phy_info;
  	u32 mask;
  	bool ok;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+	phy_info = bxt_get_phy_info(i915, phy);
#define _CHK(reg, mask, exp, fmt, ...) \
-	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
+	__phy_reg_verify_state(i915, phy, reg, mask, exp, fmt,	\
  			       ## __VA_ARGS__)
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+	if (!bxt_ddi_phy_is_enabled(i915, phy))
  		return false;
ok = true;
@@ -552,7 +552,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
  			   "BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
-		u32 grc_code = dev_priv->bxt_phy_grc;
+		u32 grc_code = i915->bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
  			   grc_code << GRC_CODE_SLOW_SHIFT |
@@ -591,13 +591,13 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
  void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
  				     u8 lane_lat_optim_mask)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	enum dpio_phy phy;
  	enum dpio_channel ch;
  	int lane;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+	bxt_port_to_phy_channel(i915, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
  		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
@@ -617,14 +617,14 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
  u8
  bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum port port = encoder->port;
  	enum dpio_phy phy;
  	enum dpio_channel ch;
  	int lane;
  	u8 mask;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+	bxt_port_to_phy_channel(i915, port, &phy, &ch);
mask = 0;
  	for (lane = 0; lane < 4; lane++) {
@@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
  			      u32 deemph_reg_value, u32 margin_reg_value,
  			      bool uniq_trans_scale)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  	enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -650,46 +650,46 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
  	u32 val;
  	int i;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Clear calc init */
-	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW10(ch));
  	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW10(ch));
  		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW10(ch), val);
  	}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW9(ch));
  	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW9(ch), val);
if (intel_crtc->config->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW9(ch));
  		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW9(ch), val);
  	}
/* Program swing deemph */
  	for (i = 0; i < intel_crtc->config->lane_count; i++) {
-		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+		val = vlv_dpio_read(i915, pipe, CHV_TX_DW4(ch, i));
  		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
  		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
-		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+		vlv_dpio_write(i915, pipe, CHV_TX_DW4(ch, i), val);
  	}
/* Program swing margin */
  	for (i = 0; i < intel_crtc->config->lane_count; i++) {
-		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+		val = vlv_dpio_read(i915, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
  		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
@@ -702,7 +702,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
  		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+		vlv_dpio_write(i915, pipe, CHV_TX_DW2(ch, i), val);
  	}
/*
@@ -712,70 +712,70 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
  	 * 27 for ch0 and ch1.
  	 */
  	for (i = 0; i < intel_crtc->config->lane_count; i++) {
-		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+		val = vlv_dpio_read(i915, pipe, CHV_TX_DW3(ch, i));
  		if (uniq_trans_scale)
  			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
  		else
  			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
-		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+		vlv_dpio_write(i915, pipe, CHV_TX_DW3(ch, i), val);
  	}
/* Start swing calculation */
-	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW10(ch));
  	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW10(ch));
  		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW10(ch), val);
  	}
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *crtc_state,
  			      bool reset)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW0(ch));
  	if (reset)
  		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  	else
  		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW0(ch));
  		if (reset)
  			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  		else
  			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW0(ch), val);
  	}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW1(ch));
  	val |= CHV_PCS_REQ_SOFTRESET_EN;
  	if (reset)
  		val &= ~DPIO_PCS_CLK_SOFT_RESET;
  	else
  		val |= DPIO_PCS_CLK_SOFT_RESET;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW1(ch));
  		val |= CHV_PCS_REQ_SOFTRESET_EN;
  		if (reset)
  			val &= ~DPIO_PCS_CLK_SOFT_RESET;
  		else
  			val |= DPIO_PCS_CLK_SOFT_RESET;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW1(ch), val);
  	}
  }
@@ -783,7 +783,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
  			    const struct intel_crtc_state *crtc_state)
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum dpio_channel ch = vlv_dport_to_channel(dport);
  	enum pipe pipe = crtc->pipe;
@@ -797,51 +797,51 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
  	 */
  	if (ch == DPIO_CH0 && pipe == PIPE_B)
  		dport->release_cl2_override =
-			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+			!chv_phy_powergate_ch(i915, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask); - vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Assert data lane reset */
  	chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
  	if (pipe != PIPE_B) {
-		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val = vlv_dpio_read(i915, pipe, _CHV_CMN_DW5_CH0);
  		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  		if (ch == DPIO_CH0)
  			val |= CHV_BUFLEFTENA1_FORCE;
  		if (ch == DPIO_CH1)
  			val |= CHV_BUFRIGHTENA1_FORCE;
-		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+		vlv_dpio_write(i915, pipe, _CHV_CMN_DW5_CH0, val);
  	} else {
-		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val = vlv_dpio_read(i915, pipe, _CHV_CMN_DW1_CH1);
  		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  		if (ch == DPIO_CH0)
  			val |= CHV_BUFLEFTENA2_FORCE;
  		if (ch == DPIO_CH1)
  			val |= CHV_BUFRIGHTENA2_FORCE;
-		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+		vlv_dpio_write(i915, pipe, _CHV_CMN_DW1_CH1, val);
  	}
/* program clock channel usage */
-	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW8(ch));
  	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  	if (pipe != PIPE_B)
  		val &= ~CHV_PCS_USEDCLKCHANNEL;
  	else
  		val |= CHV_PCS_USEDCLKCHANNEL;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW8(ch));
  		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  		if (pipe != PIPE_B)
  			val &= ~CHV_PCS_USEDCLKCHANNEL;
  		else
  			val |= CHV_PCS_USEDCLKCHANNEL;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW8(ch), val);
  	}
/*
@@ -849,14 +849,14 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
  	 * matches the pipe, but here we need to
  	 * pick the CL based on the port.
  	 */
-	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+	val = vlv_dpio_read(i915, pipe, CHV_CMN_DW19(ch));
  	if (pipe != PIPE_B)
  		val &= ~CHV_CMN_USEDCLKCHANNEL;
  	else
  		val |= CHV_CMN_USEDCLKCHANNEL;
-	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+	vlv_dpio_write(i915, pipe, CHV_CMN_DW19(ch), val);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -864,24 +864,24 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  {
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum dpio_channel ch = vlv_dport_to_channel(dport);
  	enum pipe pipe = crtc->pipe;
  	int data, i, stagger;
  	u32 val;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* allow hardware to manage TX FIFO reset source */
-	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW11(ch));
  	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW11(ch));
  		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW11(ch), val);
  	}
/* Program Tx lane latency optimal setting*/
@@ -891,7 +891,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  			data = 0x0;
  		else
  			data = (i == 1) ? 0x0 : 0x1;
-		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+		vlv_dpio_write(i915, pipe, CHV_TX_DW14(ch, i),
  				data << DPIO_UPAR_SHIFT);
  	}
@@ -907,17 +907,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  	else
  		stagger = 0x2;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW11(ch));
  	val |= DPIO_TX2_STAGGER_MASK(0x1f);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
-		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+		val = vlv_dpio_read(i915, pipe, VLV_PCS23_DW11(ch));
  		val |= DPIO_TX2_STAGGER_MASK(0x1f);
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW11(ch), val);
  	}
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+	vlv_dpio_write(i915, pipe, VLV_PCS01_DW12(ch),
  		       DPIO_LANESTAGGER_STRAP(stagger) |
  		       DPIO_LANESTAGGER_STRAP_OVRD |
  		       DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -925,7 +925,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  		       DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
-		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+		vlv_dpio_write(i915, pipe, VLV_PCS23_DW12(ch),
  			       DPIO_LANESTAGGER_STRAP(stagger) |
  			       DPIO_LANESTAGGER_STRAP_OVRD |
  			       DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -936,16 +936,16 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  	/* Deassert data lane reset */
  	chv_data_lane_soft_reset(encoder, crtc_state, false);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (dport->release_cl2_override) {
-		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+		chv_phy_powergate_ch(i915, DPIO_PHY0, DPIO_CH1, false);
  		dport->release_cl2_override = false;
  	}
  }
@@ -953,24 +953,24 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
  void chv_phy_post_pll_disable(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *old_crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
  	u32 val;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* disable left/right clock distribution */
  	if (pipe != PIPE_B) {
-		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val = vlv_dpio_read(i915, pipe, _CHV_CMN_DW5_CH0);
  		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+		vlv_dpio_write(i915, pipe, _CHV_CMN_DW5_CH0, val);
  	} else {
-		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val = vlv_dpio_read(i915, pipe, _CHV_CMN_DW1_CH1);
  		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+		vlv_dpio_write(i915, pipe, _CHV_CMN_DW1_CH1, val);
  	}
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
/*
  	 * Leave the power down bit cleared for at least one
@@ -988,57 +988,57 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
  			      u32 demph_reg_value, u32 preemph_reg_value,
  			      u32 uniqtranscale_reg_value, u32 tx3_demph)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  	enum dpio_channel port = vlv_dport_to_channel(dport);
  	enum pipe pipe = intel_crtc->pipe;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+	vlv_dpio_write(i915, pipe, VLV_TX_DW5(port), 0x00000000);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW4(port), demph_reg_value);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW2(port),
  			 uniqtranscale_reg_value);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW3(port), 0x0C782040);
if (tx3_demph)
-		vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+		vlv_dpio_write(i915, pipe, VLV_TX3_DW4(port), tx3_demph);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
  			    const struct intel_crtc_state *crtc_state)
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum dpio_channel port = vlv_dport_to_channel(dport);
  	enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
-	vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW0(port),
  			 DPIO_PCS_TX_LANE2_RESET |
  			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW1(port),
  			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  				 DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
-	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(i915, pipe, VLV_TX_DW14(port), 0x40400000);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -1046,42 +1046,42 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
  {
  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	enum dpio_channel port = vlv_dport_to_channel(dport);
  	enum pipe pipe = crtc->pipe;
  	u32 val;
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Enable clock channels for this port */
-	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+	val = vlv_dpio_read(i915, pipe, VLV_PCS01_DW8(port));
  	val = 0;
  	if (pipe)
  		val |= (1<<21);
  	else
  		val &= ~(1<<21);
  	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW8(port), val);
/* Program lane clock */
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW23(port), 0x00400888);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
  			 const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	enum dpio_channel port = vlv_dport_to_channel(dport);
  	enum pipe pipe = crtc->pipe;
- vlv_dpio_get(dev_priv);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
-	vlv_dpio_put(dev_priv);
+	vlv_dpio_get(i915);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW0(port), 0x00000000);
+	vlv_dpio_write(i915, pipe, VLV_PCS_DW1(port), 0x00e00060);
+	vlv_dpio_put(i915);
  }
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.h b/drivers/gpu/drm/i915/intel_dpio_phy.h
index f418aab90b7e..d4c9393bebce 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.h
@@ -15,16 +15,16 @@ struct drm_i915_private;
  struct intel_crtc_state;
  struct intel_encoder;
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+void bxt_port_to_phy_channel(struct drm_i915_private *i915, enum port port,
  			     enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *i915,
  				  enum port port, u32 margin, u32 scale,
  				  u32 enable, u32 deemphasis);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+void bxt_ddi_phy_init(struct drm_i915_private *i915, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *i915, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *i915,
  			    enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *i915,
  			      enum dpio_phy phy);
  u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
  void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 2d4e7b9a7b9d..2dabcdf27df4 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -45,14 +45,14 @@
   */
static void
-intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
  				  struct intel_shared_dpll_state *shared_dpll)
  {
  	enum intel_dpll_id i;
/* Copy shared dpll state */
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+	for (i = 0; i < i915->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &i915->shared_dplls[i];
shared_dpll[i] = pll->state;
  	}
@@ -77,40 +77,40 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
/**
   * intel_get_shared_dpll_by_id - get a DPLL given its id
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @id: pll id
   *
   * Returns:
   * A pointer to the DPLL with @id
   */
  struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
  			    enum intel_dpll_id id)
  {
-	return &dev_priv->shared_dplls[id];
+	return &i915->shared_dplls[id];
  }
/**
   * intel_get_shared_dpll_id - get the id of a DPLL
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @pll: the DPLL
   *
   * Returns:
   * The id of @pll
   */
  enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+intel_get_shared_dpll_id(struct drm_i915_private *i915,
  			 struct intel_shared_dpll *pll)
  {
-	if (WARN_ON(pll < dev_priv->shared_dplls||
-		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+	if (WARN_ON(pll < i915->shared_dplls||
+		    pll > &i915->shared_dplls[i915->num_shared_dpll]))
  		return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+	return (enum intel_dpll_id) (pll - i915->shared_dplls);
  }
/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
+void assert_shared_dpll(struct drm_i915_private *i915,
  			struct intel_shared_dpll *pll,
  			bool state)
  {
@@ -120,7 +120,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
  	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
  		return;
- cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
+	cur_state = pll->info->funcs->get_hw_state(i915, pll, &hw_state);
  	I915_STATE_WARN(cur_state != state,
  	     "%s assertion failure (expected %s, current %s)\n",
  			pll->info->name, onoff(state), onoff(cur_state));
@@ -136,22 +136,22 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
  void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(pll == NULL))
  		return;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
  	WARN_ON(!pll->state.crtc_mask);
  	if (!pll->active_mask) {
  		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
  		WARN_ON(pll->on);
-		assert_shared_dpll_disabled(dev_priv, pll);
+		assert_shared_dpll_disabled(i915, pll);
- pll->info->funcs->prepare(dev_priv, pll);
+		pll->info->funcs->prepare(i915, pll);
  	}
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
/**
@@ -163,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
  void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
  	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
  	unsigned int old_mask;
@@ -171,7 +171,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
  	if (WARN_ON(pll == NULL))
  		return;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
  	old_mask = pll->active_mask;
if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
@@ -186,17 +186,17 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (old_mask) {
  		WARN_ON(!pll->on);
-		assert_shared_dpll_enabled(dev_priv, pll);
+		assert_shared_dpll_enabled(i915, pll);
  		goto out;
  	}
  	WARN_ON(pll->on);
DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
-	pll->info->funcs->enable(dev_priv, pll);
+	pll->info->funcs->enable(i915, pll);
  	pll->on = true;
out:
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
/**
@@ -208,18 +208,18 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
  void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
  	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
-	if (INTEL_GEN(dev_priv) < 5)
+	if (INTEL_GEN(i915) < 5)
  		return;
if (pll == NULL)
  		return;
- mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&i915->dpll_lock);
  	if (WARN_ON(!(pll->active_mask & crtc_mask)))
  		goto out;
@@ -227,7 +227,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
  		      pll->info->name, pll->active_mask, pll->on,
  		      crtc->base.base.id);
- assert_shared_dpll_enabled(dev_priv, pll);
+	assert_shared_dpll_enabled(i915, pll);
  	WARN_ON(!pll->on);
pll->active_mask &= ~crtc_mask;
@@ -235,11 +235,11 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
  		goto out;
DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
-	pll->info->funcs->disable(dev_priv, pll);
+	pll->info->funcs->disable(i915, pll);
  	pll->on = false;
out:
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&i915->dpll_lock);
  }
static struct intel_shared_dpll *
@@ -248,7 +248,7 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
  		       enum intel_dpll_id range_max)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll, *unused_pll = NULL;
  	struct intel_shared_dpll_state *shared_dpll;
  	enum intel_dpll_id i;
@@ -256,7 +256,7 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
  	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
for (i = range_min; i <= range_max; i++) {
-		pll = &dev_priv->shared_dplls[i];
+		pll = &i915->shared_dplls[i];
/* Only want to check enabled timings first */
  		if (shared_dpll[i].crtc_mask == 0) {
@@ -322,7 +322,7 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
   */
  void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct drm_i915_private *i915 = to_i915(state->dev);
  	struct intel_shared_dpll_state *shared_dpll;
  	struct intel_shared_dpll *pll;
  	enum intel_dpll_id i;
@@ -331,10 +331,10 @@ void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
  		return;
shared_dpll = to_intel_atomic_state(state)->shared_dpll;
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+	for (i = 0; i < i915->num_shared_dpll; i++) {
  		struct intel_shared_dpll_state tmp;
- pll = &dev_priv->shared_dplls[i];
+		pll = &i915->shared_dplls[i];
tmp = pll->state;
  		pll->state = shared_dpll[i];
@@ -342,7 +342,7 @@ void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
  	}
  }
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
  				      struct intel_shared_dpll *pll,
  				      struct intel_dpll_hw_state *hw_state)
  {
@@ -350,7 +350,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
  	intel_wakeref_t wakeref;
  	u32 val;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -360,12 +360,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
  	hw_state->fp0 = I915_READ(PCH_FP0(id));
  	hw_state->fp1 = I915_READ(PCH_FP1(id));
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & DPLL_VCO_ENABLE;
  }
-static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
+static void ibx_pch_dpll_prepare(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -374,12 +374,12 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
  	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
  }
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
  {
  	u32 val;
  	bool enabled;
- I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+	I915_STATE_WARN_ON(!(HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915)));
val = I915_READ(PCH_DREF_CONTROL);
  	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -387,13 +387,13 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  }
-static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
/* PCH refclock must be enabled first */
-	ibx_assert_pch_refclk_enabled(dev_priv);
+	ibx_assert_pch_refclk_enabled(i915);
I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); @@ -411,7 +411,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
  	udelay(200);
  }
-static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -426,14 +426,14 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
  	     struct intel_encoder *encoder)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll;
  	enum intel_dpll_id i;
- if (HAS_PCH_IBX(dev_priv)) {
+	if (HAS_PCH_IBX(i915)) {
  		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
  		i = (enum intel_dpll_id) crtc->pipe;
-		pll = &dev_priv->shared_dplls[i];
+		pll = &i915->shared_dplls[i];
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  			      crtc->base.base.id, crtc->base.name,
@@ -453,7 +453,7 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
+static void ibx_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
@@ -471,7 +471,7 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
  	.get_hw_state = ibx_pch_dpll_get_hw_state,
  };
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
  			       struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -481,7 +481,7 @@ static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
  	udelay(20);
  }
-static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll)
  {
  	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
@@ -489,7 +489,7 @@ static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
  	udelay(20);
  }
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
  				  struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -500,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
  	POSTING_READ(WRPLL_CTL(id));
  }
-static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll)
  {
  	u32 val;
@@ -510,7 +510,7 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
  	POSTING_READ(SPLL_CTL);
  }
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
  				       struct intel_shared_dpll *pll,
  				       struct intel_dpll_hw_state *hw_state)
  {
@@ -518,7 +518,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
  	intel_wakeref_t wakeref;
  	u32 val;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -526,19 +526,19 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
  	val = I915_READ(WRPLL_CTL(id));
  	hw_state->wrpll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & WRPLL_PLL_ENABLE;
  }
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
  				      struct intel_shared_dpll *pll,
  				      struct intel_dpll_hw_state *hw_state)
  {
  	intel_wakeref_t wakeref;
  	u32 val;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -546,7 +546,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
  	val = I915_READ(SPLL_CTL);
  	hw_state->spll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & SPLL_PLL_ENABLE;
  }
@@ -793,7 +793,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
  static struct intel_shared_dpll *
  hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_shared_dpll *pll;
  	enum intel_dpll_id pll_id;
  	int clock = crtc_state->port_clock;
@@ -813,7 +813,7 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
  		return NULL;
  	}
- pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+	pll = intel_get_shared_dpll_by_id(i915, pll_id);
if (!pll)
  		return NULL;
@@ -855,7 +855,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
+static void hsw_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
@@ -874,17 +874,17 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
  	.get_hw_state = hsw_ddi_spll_get_hw_state,
  };
-static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll)
  {
  }
-static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
  				  struct intel_shared_dpll *pll)
  {
  }
-static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
  				       struct intel_shared_dpll *pll,
  				       struct intel_dpll_hw_state *hw_state)
  {
@@ -928,7 +928,7 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
  	},
  };
-static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
  				    struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -945,13 +945,13 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
  	POSTING_READ(DPLL_CTRL1);
  }
-static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+static void skl_ddi_pll_enable(struct drm_i915_private *i915,
  			       struct intel_shared_dpll *pll)
  {
  	const struct skl_dpll_regs *regs = skl_dpll_regs;
  	const enum intel_dpll_id id = pll->info->id;
- skl_ddi_pll_write_ctrl1(dev_priv, pll);
+	skl_ddi_pll_write_ctrl1(i915, pll);
I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
  	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
@@ -962,7 +962,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(regs[id].ctl,
  		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    DPLL_STATUS,
  				    DPLL_LOCK(id),
  				    DPLL_LOCK(id),
@@ -970,13 +970,13 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  		DRM_ERROR("DPLL %d not locked\n", id);
  }
-static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll)
  {
-	skl_ddi_pll_write_ctrl1(dev_priv, pll);
+	skl_ddi_pll_write_ctrl1(i915, pll);
  }
-static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+static void skl_ddi_pll_disable(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll)
  {
  	const struct skl_dpll_regs *regs = skl_dpll_regs;
@@ -988,12 +988,12 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  	POSTING_READ(regs[id].ctl);
  }
-static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
  				  struct intel_shared_dpll *pll)
  {
  }
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
  				     struct intel_shared_dpll *pll,
  				     struct intel_dpll_hw_state *hw_state)
  {
@@ -1003,7 +1003,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	intel_wakeref_t wakeref;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -1025,12 +1025,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
  }
-static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
  				       struct intel_shared_dpll *pll,
  				       struct intel_dpll_hw_state *hw_state)
  {
@@ -1040,7 +1040,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
  	u32 val;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -1058,7 +1058,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
  }
@@ -1424,7 +1424,7 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
+static void skl_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: "
@@ -1446,7 +1446,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
  	.get_hw_state = skl_ddi_dpll0_get_hw_state,
  };
-static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll)
  {
  	u32 temp;
@@ -1454,14 +1454,14 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	enum dpio_phy phy;
  	enum dpio_channel ch;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+	bxt_port_to_phy_channel(i915, port, &phy, &ch);
/* Non-SSC reference */
  	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  	temp |= PORT_PLL_REF_SEL;
  	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  		temp |= PORT_PLL_POWER_ENABLE;
  		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
@@ -1549,7 +1549,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
  			200))
  		DRM_ERROR("PLL %d not locked\n", port);
- if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
  		temp |= DCC_DELAY_RANGE_2;
  		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
@@ -1566,7 +1566,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
  }
-static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
  					struct intel_shared_dpll *pll)
  {
  	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
@@ -1577,7 +1577,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
  	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
- if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  		temp &= ~PORT_PLL_POWER_ENABLE;
  		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
@@ -1588,7 +1588,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
  	}
  }
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
  					struct intel_shared_dpll *pll,
  					struct intel_dpll_hw_state *hw_state)
  {
@@ -1599,9 +1599,9 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	u32 val;
  	bool ret;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+	bxt_port_to_phy_channel(i915, port, &phy, &ch);
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -1660,7 +1660,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
  }
@@ -1832,7 +1832,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
  	     struct intel_encoder *encoder)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_shared_dpll *pll;
  	enum intel_dpll_id id;
@@ -1846,7 +1846,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state, /* 1:1 mapping between ports and PLLs */
  	id = (enum intel_dpll_id) encoder->port;
-	pll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pll = intel_get_shared_dpll_by_id(i915, id);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  		      crtc->base.base.id, crtc->base.name, pll->info->name);
@@ -1856,7 +1856,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
+static void bxt_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
@@ -1887,7 +1887,7 @@ struct intel_dpll_mgr {
  	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
  					      struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+	void (*dump_hw_state)(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state);
  };
@@ -1946,7 +1946,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
  	.dump_hw_state = bxt_dump_hw_state,
  };
-static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+static void cnl_ddi_pll_enable(struct drm_i915_private *i915,
  			       struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -1958,7 +1958,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    CNL_DPLL_ENABLE(id),
  				    PLL_POWER_STATE,
  				    PLL_POWER_STATE,
@@ -1999,7 +1999,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    CNL_DPLL_ENABLE(id),
  				    PLL_LOCK,
  				    PLL_LOCK,
@@ -2021,7 +2021,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  	 */
  }
-static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+static void cnl_ddi_pll_disable(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll)
  {
  	const enum intel_dpll_id id = pll->info->id;
@@ -2047,7 +2047,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  	I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    CNL_DPLL_ENABLE(id),
  				    PLL_LOCK,
  				    0,
@@ -2069,7 +2069,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  	I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    CNL_DPLL_ENABLE(id),
  				    PLL_POWER_STATE,
  				    0,
@@ -2077,7 +2077,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  		DRM_ERROR("PLL %d Power not disabled\n", id);
  }
-static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
  				     struct intel_shared_dpll *pll,
  				     struct intel_dpll_hw_state *hw_state)
  {
@@ -2086,7 +2086,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	u32 val;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -2107,7 +2107,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	ret = true;
out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
  }
@@ -2199,15 +2199,15 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
  	params->dco_fraction = dco & 0x7fff;
  }
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *i915)
  {
-	int ref_clock = dev_priv->cdclk.hw.ref;
+	int ref_clock = i915->cdclk.hw.ref;
/*
  	 * For ICL+, the spec states: if reference frequency is 38.4,
  	 * use 19.2 because the DPLL automatically divides that by 2.
  	 */
-	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
+	if (INTEL_GEN(i915) >= 11 && ref_clock == 38400)
  		ref_clock = 19200;
return ref_clock;
@@ -2217,7 +2217,7 @@ static bool
  cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
  			struct skl_wrpll_params *wrpll_params)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	u32 afe_clock = crtc_state->port_clock * 5;
  	u32 ref_clock;
  	u32 dco_min = 7998000;
@@ -2252,7 +2252,7 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); - ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
+	ref_clock = cnl_hdmi_pll_ref_clock(i915);
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
  				  pdiv, qdiv, kdiv);
@@ -2370,7 +2370,7 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
+static void cnl_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: "
@@ -2476,9 +2476,9 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
  static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
  				  struct skl_wrpll_params *pll_params)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	const struct icl_combo_pll_params *params =
-		dev_priv->cdclk.hw.ref == 24000 ?
+		i915->cdclk.hw.ref == 24000 ?
  		icl_dp_combo_pll_24MHz_values :
  		icl_dp_combo_pll_19_2MHz_values;
  	int clock = crtc_state->port_clock;
@@ -2498,9 +2498,9 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
  static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
  			     struct skl_wrpll_params *pll_params)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
+	*pll_params = i915->cdclk.hw.ref == 24000 ?
  			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
  	return true;
  }
@@ -2508,12 +2508,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
  static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
  				struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	u32 cfgcr0, cfgcr1;
  	struct skl_wrpll_params pll_params = { 0 };
  	bool ret;
- if (intel_port_is_tc(dev_priv, encoder->port))
+	if (intel_port_is_tc(i915, encoder->port))
  		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
  	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
@@ -2629,9 +2629,9 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
   */
  static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
-	int refclk_khz = dev_priv->cdclk.hw.ref;
+	int refclk_khz = i915->cdclk.hw.ref;
  	int clock = crtc_state->port_clock;
  	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
  	u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2796,18 +2796,18 @@ static struct intel_shared_dpll *
  icl_get_dpll(struct intel_crtc_state *crtc_state,
  	     struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_digital_port *intel_dig_port;
  	struct intel_shared_dpll *pll;
  	enum port port = encoder->port;
  	enum intel_dpll_id min, max;
  	bool ret;
- if (intel_port_is_combophy(dev_priv, port)) {
+	if (intel_port_is_combophy(i915, port)) {
  		min = DPLL_ID_ICL_DPLL0;
  		max = DPLL_ID_ICL_DPLL1;
  		ret = icl_calc_dpll_state(crtc_state, encoder);
-	} else if (intel_port_is_tc(dev_priv, port)) {
+	} else if (intel_port_is_tc(i915, port)) {
  		if (encoder->type == INTEL_OUTPUT_DP_MST) {
  			struct intel_dp_mst_encoder *mst_encoder;
@@ -2824,7 +2824,7 @@ icl_get_dpll(struct intel_crtc_state *crtc_state,
  		} else {
  			enum tc_port tc_port;
- tc_port = intel_port_to_tc(dev_priv, port);
+			tc_port = intel_port_to_tc(i915, port);
  			min = icl_tc_port_to_pll_id(tc_port);
  			max = min;
  			ret = icl_calc_mg_pll_state(crtc_state);
@@ -2851,7 +2851,7 @@ icl_get_dpll(struct intel_crtc_state *crtc_state,
  	return pll;
  }
-static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
  				struct intel_shared_dpll *pll,
  				struct intel_dpll_hw_state *hw_state)
  {
@@ -2861,7 +2861,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	bool ret = false;
  	u32 val;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -2896,7 +2896,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	hw_state->mg_pll_tdc_coldst_bias =
  		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+	if (i915->cdclk.hw.ref == 38400) {
  		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
  		hw_state->mg_pll_bias_mask = 0;
  	} else {
@@ -2909,11 +2909,11 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
  out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  	return ret;
  }
-static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll,
  				 struct intel_dpll_hw_state *hw_state,
  				 i915_reg_t enable_reg)
@@ -2923,7 +2923,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
  	bool ret = false;
  	u32 val;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     POWER_DOMAIN_DISPLAY_CORE);
  	if (!wakeref)
  		return false;
@@ -2937,26 +2937,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
  out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  	return ret;
  }
-static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
  				   struct intel_shared_dpll *pll,
  				   struct intel_dpll_hw_state *hw_state)
  {
-	return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+	return icl_pll_get_hw_state(i915, pll, hw_state,
  				    CNL_DPLL_ENABLE(pll->info->id));
  }
-static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll,
  				 struct intel_dpll_hw_state *hw_state)
  {
-	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
  }
-static void icl_dpll_write(struct drm_i915_private *dev_priv,
+static void icl_dpll_write(struct drm_i915_private *i915,
  			   struct intel_shared_dpll *pll)
  {
  	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
@@ -2967,7 +2967,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
  	POSTING_READ(ICL_DPLL_CFGCR1(id));
  }
-static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
+static void icl_mg_pll_write(struct drm_i915_private *i915,
  			     struct intel_shared_dpll *pll)
  {
  	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
@@ -3017,7 +3017,7 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
  	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
  }
-static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+static void icl_pll_power_enable(struct drm_i915_private *i915,
  				 struct intel_shared_dpll *pll,
  				 i915_reg_t enable_reg)
  {
@@ -3031,12 +3031,12 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
  	 * The spec says we need to "wait" but it also says it should be
  	 * immediate.
  	 */
-	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+	if (intel_wait_for_register(&i915->uncore, enable_reg,
  				    PLL_POWER_STATE, PLL_POWER_STATE, 1))
  		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
  }
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
+static void icl_pll_enable(struct drm_i915_private *i915,
  			   struct intel_shared_dpll *pll,
  			   i915_reg_t enable_reg)
  {
@@ -3047,19 +3047,19 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
  	I915_WRITE(enable_reg, val);
/* Timeout is actually 600us. */
-	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+	if (intel_wait_for_register(&i915->uncore, enable_reg,
  				    PLL_LOCK, PLL_LOCK, 1))
  		DRM_ERROR("PLL %d not locked\n", pll->info->id);
  }
-static void combo_pll_enable(struct drm_i915_private *dev_priv,
+static void combo_pll_enable(struct drm_i915_private *i915,
  			     struct intel_shared_dpll *pll)
  {
  	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
- icl_pll_power_enable(dev_priv, pll, enable_reg);
+	icl_pll_power_enable(i915, pll, enable_reg);
- icl_dpll_write(dev_priv, pll);
+	icl_dpll_write(i915, pll);
/*
  	 * DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3067,17 +3067,17 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
  	 * nothing here.
  	 */
- icl_pll_enable(dev_priv, pll, enable_reg);
+	icl_pll_enable(i915, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
  }
-static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+static void tbt_pll_enable(struct drm_i915_private *i915,
  			   struct intel_shared_dpll *pll)
  {
-	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
- icl_dpll_write(dev_priv, pll);
+	icl_dpll_write(i915, pll);
/*
  	 * DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3085,20 +3085,20 @@ static void tbt_pll_enable(struct drm_i915_private *dev_priv,
  	 * nothing here.
  	 */
- icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
/* DVFS post sequence would be here. See the comment above. */
  }
-static void mg_pll_enable(struct drm_i915_private *dev_priv,
+static void mg_pll_enable(struct drm_i915_private *i915,
  			  struct intel_shared_dpll *pll)
  {
  	i915_reg_t enable_reg =
  		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
- icl_pll_power_enable(dev_priv, pll, enable_reg);
+	icl_pll_power_enable(i915, pll, enable_reg);
- icl_mg_pll_write(dev_priv, pll);
+	icl_mg_pll_write(i915, pll);
/*
  	 * DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3106,12 +3106,12 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
  	 * nothing here.
  	 */
- icl_pll_enable(dev_priv, pll, enable_reg);
+	icl_pll_enable(i915, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
  }
-static void icl_pll_disable(struct drm_i915_private *dev_priv,
+static void icl_pll_disable(struct drm_i915_private *i915,
  			    struct intel_shared_dpll *pll,
  			    i915_reg_t enable_reg)
  {
@@ -3130,7 +3130,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
  	I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    enable_reg, PLL_LOCK, 0, 1))
  		DRM_ERROR("PLL %d locked\n", pll->info->id);
@@ -3144,33 +3144,33 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
  	 * The spec says we need to "wait" but it also says it should be
  	 * immediate.
  	 */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    enable_reg, PLL_POWER_STATE, 0, 1))
  		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
  }
-static void combo_pll_disable(struct drm_i915_private *dev_priv,
+static void combo_pll_disable(struct drm_i915_private *i915,
  			      struct intel_shared_dpll *pll)
  {
-	icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+	icl_pll_disable(i915, pll, CNL_DPLL_ENABLE(pll->info->id));
  }
-static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+static void tbt_pll_disable(struct drm_i915_private *i915,
  			    struct intel_shared_dpll *pll)
  {
-	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
  }
-static void mg_pll_disable(struct drm_i915_private *dev_priv,
+static void mg_pll_disable(struct drm_i915_private *i915,
  			   struct intel_shared_dpll *pll)
  {
  	i915_reg_t enable_reg =
  		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
- icl_pll_disable(dev_priv, pll, enable_reg);
+	icl_pll_disable(i915, pll, enable_reg);
  }
-static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
+static void icl_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
  	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
@@ -3247,28 +3247,28 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
   */
  void intel_shared_dpll_init(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	const struct intel_dpll_mgr *dpll_mgr = NULL;
  	const struct dpll_info *dpll_info;
  	int i;
- if (IS_ELKHARTLAKE(dev_priv))
+	if (IS_ELKHARTLAKE(i915))
  		dpll_mgr = &ehl_pll_mgr;
-	else if (INTEL_GEN(dev_priv) >= 11)
+	else if (INTEL_GEN(i915) >= 11)
  		dpll_mgr = &icl_pll_mgr;
-	else if (IS_CANNONLAKE(dev_priv))
+	else if (IS_CANNONLAKE(i915))
  		dpll_mgr = &cnl_pll_mgr;
-	else if (IS_GEN9_BC(dev_priv))
+	else if (IS_GEN9_BC(i915))
  		dpll_mgr = &skl_pll_mgr;
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		dpll_mgr = &bxt_pll_mgr;
-	else if (HAS_DDI(dev_priv))
+	else if (HAS_DDI(i915))
  		dpll_mgr = &hsw_pll_mgr;
-	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
  		dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
-		dev_priv->num_shared_dpll = 0;
+		i915->num_shared_dpll = 0;
  		return;
  	}
@@ -3276,14 +3276,14 @@ void intel_shared_dpll_init(struct drm_device *dev) for (i = 0; dpll_info[i].name; i++) {
  		WARN_ON(i != dpll_info[i].id);
-		dev_priv->shared_dplls[i].info = &dpll_info[i];
+		i915->shared_dplls[i].info = &dpll_info[i];
  	}
- dev_priv->dpll_mgr = dpll_mgr;
-	dev_priv->num_shared_dpll = i;
-	mutex_init(&dev_priv->dpll_lock);
+	i915->dpll_mgr = dpll_mgr;
+	i915->num_shared_dpll = i;
+	mutex_init(&i915->dpll_lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+	BUG_ON(i915->num_shared_dpll > I915_NUM_PLLS);
  }
/**
@@ -3304,8 +3304,8 @@ struct intel_shared_dpll *
  intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
  		      struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
+	const struct intel_dpll_mgr *dpll_mgr = i915->dpll_mgr;
if (WARN_ON(!dpll_mgr))
  		return NULL;
@@ -3335,16 +3335,16 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
/**
   * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
- * @dev_priv: i915 drm device
+ * @i915: i915 drm device
   * @hw_state: hw state to be written to the log
   *
   * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
   */
-void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state)
  {
-	if (dev_priv->dpll_mgr) {
-		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+	if (i915->dpll_mgr) {
+		i915->dpll_mgr->dump_hw_state(i915, hw_state);
  	} else {
  		/* fallback for platforms that don't use the shared dpll
  		 * infrastructure
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index d0570414f3d1..b068c8441328 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -221,7 +221,7 @@ struct intel_shared_dpll_funcs {
  	 * Called from intel_prepare_shared_dpll() function unless the PLL
  	 * is already enabled.
  	 */
-	void (*prepare)(struct drm_i915_private *dev_priv,
+	void (*prepare)(struct drm_i915_private *i915,
  			struct intel_shared_dpll *pll);
/**
@@ -230,7 +230,7 @@ struct intel_shared_dpll_funcs {
  	 * Hook for enabling the pll, called from intel_enable_shared_dpll()
  	 * if the pll is not already enabled.
  	 */
-	void (*enable)(struct drm_i915_private *dev_priv,
+	void (*enable)(struct drm_i915_private *i915,
  		       struct intel_shared_dpll *pll);
/**
@@ -240,7 +240,7 @@ struct intel_shared_dpll_funcs {
  	 * only when it is safe to disable the pll, i.e., there are no more
  	 * tracked users for it.
  	 */
-	void (*disable)(struct drm_i915_private *dev_priv,
+	void (*disable)(struct drm_i915_private *i915,
  			struct intel_shared_dpll *pll);
/**
@@ -250,7 +250,7 @@ struct intel_shared_dpll_funcs {
  	 * registers. This is used for initial hw state readout and state
  	 * verification after a mode set.
  	 */
-	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+	bool (*get_hw_state)(struct drm_i915_private *i915,
  			     struct intel_shared_dpll *pll,
  			     struct intel_dpll_hw_state *hw_state);
  };
@@ -271,7 +271,7 @@ struct dpll_info {
/**
  	 * @id: unique indentifier for this DPLL; should match the index in the
-	 * dev_priv->shared_dplls array
+	 * i915->shared_dplls array
  	 */
  	enum intel_dpll_id id;
@@ -321,12 +321,12 @@ struct intel_shared_dpll { /* shared dpll functions */
  struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
  			    enum intel_dpll_id id);
  enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+intel_get_shared_dpll_id(struct drm_i915_private *i915,
  			 struct intel_shared_dpll *pll);
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
+void assert_shared_dpll(struct drm_i915_private *i915,
  			struct intel_shared_dpll *pll,
  			bool state);
  #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
@@ -342,9 +342,9 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
  void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
  void intel_shared_dpll_init(struct drm_device *dev);
-void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
  			      const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *i915);
  enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
  bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3e337317f77e..23065dda82d9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -963,7 +963,7 @@ struct intel_crtc {
struct intel_crtc_state *config; - /* Access to these should be protected by dev_priv->irq_lock. */
+	/* Access to these should be protected by i915->irq_lock. */
  	bool cpu_fifo_underrun_disabled;
  	bool pch_fifo_underrun_disabled;
@@ -1293,15 +1293,15 @@ vlv_pipe_to_channel(enum pipe pipe)
  }
static inline struct intel_crtc *
-intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe)
  {
-	return dev_priv->pipe_to_crtc_mapping[pipe];
+	return i915->pipe_to_crtc_mapping[pipe];
  }
static inline struct intel_crtc *
-intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane)
+intel_get_crtc_for_plane(struct drm_i915_private *i915, enum i9xx_plane_id plane)
  {
-	return dev_priv->plane_to_crtc_mapping[plane];
+	return i915->plane_to_crtc_mapping[plane];
  }
struct intel_load_detect_pipe {
@@ -1447,17 +1447,17 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
/* intel_display.c */
  void intel_plane_destroy(struct drm_plane *plane);
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void i830_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
  enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_hpll_vco(struct drm_i915_private *i915);
+int vlv_get_cck_clock(struct drm_i915_private *i915,
  		      const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_i915_private *i915,
  			   const char *name, u32 reg);
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+void lpt_disable_pch_transcoder(struct drm_i915_private *i915);
+void lpt_disable_iclkip(struct drm_i915_private *i915);
+void intel_init_display_hooks(struct drm_i915_private *i915);
  unsigned int intel_fb_xy_to_linear(int x, int y,
  				   const struct intel_plane_state *state,
  				   int plane);
@@ -1467,19 +1467,19 @@ void intel_add_fb_offsets(int *x, int *y,
  			  const struct intel_plane_state *state, int plane);
  unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
  unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *i915);
  int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
  void intel_encoder_destroy(struct drm_encoder *encoder);
  struct drm_display_mode *
  intel_encoder_current_mode(struct intel_encoder *encoder);
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+bool intel_port_is_combophy(struct drm_i915_private *i915, enum port port);
+bool intel_port_is_tc(struct drm_i915_private *i915, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *i915,
  			      enum port port);
  int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
  				      struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *i915,
  					     enum pipe pipe);
  static inline bool
  intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
@@ -1496,23 +1496,23 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
  		 (1 << INTEL_OUTPUT_EDP));
  }
  static inline void
-intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_wait_for_vblank(struct drm_i915_private *i915, enum pipe pipe)
  {
-	drm_wait_one_vblank(&dev_priv->drm, pipe);
+	drm_wait_one_vblank(&i915->drm, pipe);
  }
  static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *i915, int pipe)
  {
-	const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	const struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
if (crtc->active)
-		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(i915, pipe);
  }
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+void vlv_wait_port_ready(struct drm_i915_private *i915,
  			 struct intel_digital_port *dport,
  			 unsigned int expected_mask);
  int intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -1536,34 +1536,34 @@ int intel_prepare_plane_fb(struct drm_plane *plane,
  void intel_cleanup_plane_fb(struct drm_plane *plane,
  			    struct drm_plane_state *old_state);
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+void assert_pch_transcoder_disabled(struct drm_i915_private *i915,
  				    enum pipe pipe);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *i915, enum pipe pipe,
  		     const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void vlv_force_pll_off(struct drm_i915_private *i915, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *i915);
  bool intel_fuzzy_clock_check(int clock1, int clock2);
/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+void assert_panel_unlocked(struct drm_i915_private *i915,
  			   enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
+void assert_pll(struct drm_i915_private *i915,
  		enum pipe pipe, bool state);
  #define assert_pll_enabled(d, p) assert_pll(d, p, true)
  #define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+void assert_dsi_pll(struct drm_i915_private *i915, bool state);
  #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
  #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+void assert_fdi_rx_pll(struct drm_i915_private *i915,
  		       enum pipe pipe, bool state);
  #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
  #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
+void assert_pipe(struct drm_i915_private *i915, enum pipe pipe, bool state);
  #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
  #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_prepare_reset(struct drm_i915_private *dev_priv);
-void intel_finish_reset(struct drm_i915_private *dev_priv);
+void intel_prepare_reset(struct drm_i915_private *i915);
+void intel_finish_reset(struct drm_i915_private *i915);
  void intel_dp_get_m_n(struct intel_crtc *crtc,
  		      struct intel_crtc_state *pipe_config);
  void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5fec02aceaed..09414da3d818 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -113,14 +113,14 @@ struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
  enum drm_panel_orientation
  intel_dsi_get_panel_orientation(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum drm_panel_orientation orientation;
- orientation = dev_priv->vbt.dsi.orientation;
+	orientation = i915->vbt.dsi.orientation;
  	if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
  		return orientation;
- orientation = dev_priv->vbt.orientation;
+	orientation = i915->vbt.orientation;
  	if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
  		return orientation;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 6d20434636cd..2f477263d1f6 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -155,7 +155,7 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
  }
/* icl_dsi.c */
-void icl_dsi_init(struct drm_i915_private *dev_priv);
+void icl_dsi_init(struct drm_i915_private *i915);
/* intel_dsi.c */
  int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
@@ -172,7 +172,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
  struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
  					   const struct mipi_dsi_host_ops *funcs,
  					   enum port port);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct drm_i915_private *i915);
/* vlv_dsi_pll.c */
  int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -184,7 +184,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
  		     struct intel_crtc_state *config);
  void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *i915);
  int bxt_dsi_pll_compute(struct intel_encoder *encoder,
  			struct intel_crtc_state *config);
  void bxt_dsi_pll_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 8c33262cb0b2..e5dd4962705c 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -159,11 +159,11 @@ static int dcs_setup_backlight(struct intel_connector *connector,
  int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
  {
  	struct drm_device *dev = intel_connector->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_encoder *encoder = intel_connector->encoder;
  	struct intel_panel *panel = &intel_connector->panel;
- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+	if (i915->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
  		return -ENODEV;
if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index e5b178660408..d44ebdfd8760 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -121,7 +121,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
  static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
  				       const u8 *data)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
  	struct mipi_dsi_device *dsi_device;
  	u8 type, flags, seq_port;
  	u16 len;
@@ -192,7 +192,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
  		break;
  	}
- if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -213,7 +213,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
  	return data;
  }
-static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+static void vlv_exec_gpio(struct drm_i915_private *i915,
  			  u8 gpio_source, u8 gpio_index, bool value)
  {
  	struct gpio_map *map;
@@ -228,7 +228,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index]; - if (dev_priv->vbt.dsi.seq_version >= 3) {
+	if (i915->vbt.dsi.seq_version >= 3) {
  		/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
  		port = IOSF_PORT_GPIO_NC;
  	} else {
@@ -246,26 +246,26 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
  	pconf0 = VLV_GPIO_PCONF0(map->base_offset);
  	padval = VLV_GPIO_PAD_VAL(map->base_offset);
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+	vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_GPIO));
  	if (!map->init) {
  		/* FIXME: remove constant below */
-		vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+		vlv_iosf_sb_write(i915, port, pconf0, 0x2000CC00);
  		map->init = true;
  	}
tmp = 0x4 | value;
-	vlv_iosf_sb_write(dev_priv, port, padval, tmp);
-	vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+	vlv_iosf_sb_write(i915, port, padval, tmp);
+	vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_GPIO));
  }
-static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+static void chv_exec_gpio(struct drm_i915_private *i915,
  			  u8 gpio_source, u8 gpio_index, bool value)
  {
  	u16 cfg0, cfg1;
  	u16 family_num;
  	u8 port;
- if (dev_priv->vbt.dsi.seq_version >= 3) {
+	if (i915->vbt.dsi.seq_version >= 3) {
  		if (gpio_index >= CHV_GPIO_IDX_START_SE) {
  			/* XXX: it's unclear whether 255->57 is part of SE. */
  			gpio_index -= CHV_GPIO_IDX_START_SE;
@@ -301,15 +301,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
  	cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
  	cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
-	vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
-	vlv_iosf_sb_write(dev_priv, port, cfg0,
+	vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_GPIO));
+	vlv_iosf_sb_write(i915, port, cfg1, 0);
+	vlv_iosf_sb_write(i915, port, cfg0,
  			  CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
  			  CHV_GPIO_GPIOTXSTATE(value));
-	vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+	vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_GPIO));
  }
-static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+static void bxt_exec_gpio(struct drm_i915_private *i915,
  			  u8 gpio_source, u8 gpio_index, bool value)
  {
  	/* XXX: this table is a quick ugly hack. */
@@ -317,7 +317,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
  	struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
if (!gpio_desc) {
-		gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
+		gpio_desc = devm_gpiod_get_index(i915->drm.dev,
  						 NULL, gpio_index,
  						 value ? GPIOD_OUT_LOW :
  						 GPIOD_OUT_HIGH);
@@ -334,7 +334,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
  	gpiod_set_value(gpio_desc, value);
  }
-static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+static void icl_exec_gpio(struct drm_i915_private *i915,
  			  u8 gpio_source, u8 gpio_index, bool value)
  {
  	DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
@@ -343,19 +343,19 @@ static void icl_exec_gpio(struct drm_i915_private *dev_priv,
  static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
  {
  	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u8 gpio_source, gpio_index = 0, gpio_number;
  	bool value;
DRM_DEBUG_KMS("\n"); - if (dev_priv->vbt.dsi.seq_version >= 3)
+	if (i915->vbt.dsi.seq_version >= 3)
  		gpio_index = *data++;
gpio_number = *data++; /* gpio source in sequence v2 only */
-	if (dev_priv->vbt.dsi.seq_version == 2)
+	if (i915->vbt.dsi.seq_version == 2)
  		gpio_source = (*data >> 1) & 3;
  	else
  		gpio_source = 0;
@@ -363,14 +363,14 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
  	/* pull up/down */
  	value = *data++ & 1;
- if (INTEL_GEN(dev_priv) >= 11)
-		icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
-	else if (IS_VALLEYVIEW(dev_priv))
-		vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
-	else if (IS_CHERRYVIEW(dev_priv))
-		chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
+	if (INTEL_GEN(i915) >= 11)
+		icl_exec_gpio(i915, gpio_source, gpio_index, value);
+	else if (IS_VALLEYVIEW(i915))
+		vlv_exec_gpio(i915, gpio_source, gpio_number, value);
+	else if (IS_CHERRYVIEW(i915))
+		chv_exec_gpio(i915, gpio_source, gpio_number, value);
  	else
-		bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+		bxt_exec_gpio(i915, gpio_source, gpio_index, value);
return data;
  }
@@ -456,14 +456,14 @@ static const char *sequence_name(enum mipi_seq seq_id)
  void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
  				 enum mipi_seq seq_id)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
  	const u8 *data;
  	fn_mipi_elem_exec mipi_elem_exec;
- if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+	if (WARN_ON(seq_id >= ARRAY_SIZE(i915->vbt.dsi.sequence)))
  		return;
- data = dev_priv->vbt.dsi.sequence[seq_id];
+	data = i915->vbt.dsi.sequence[seq_id];
  	if (!data)
  		return;
@@ -476,7 +476,7 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
  	data++;
/* Skip Size of Sequence. */
-	if (dev_priv->vbt.dsi.seq_version >= 3)
+	if (i915->vbt.dsi.seq_version >= 3)
  		data += 4;
while (1) {
@@ -492,7 +492,7 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
  			mipi_elem_exec = NULL;
/* Size of Operation. */
-		if (dev_priv->vbt.dsi.seq_version >= 3)
+		if (i915->vbt.dsi.seq_version >= 3)
  			operation_size = *data++;
if (mipi_elem_exec) {
@@ -521,10 +521,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
-	if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+	if (is_vid_mode(intel_dsi) && i915->vbt.dsi.seq_version >= 3)
  		return;
msleep(msec);
@@ -571,10 +571,10 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
  bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
  {
  	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
-	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
-	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct mipi_config *mipi_config = i915->vbt.dsi.config;
+	struct mipi_pps_data *pps = i915->vbt.dsi.pps;
+	struct drm_display_mode *mode = i915->vbt.lfp_lvds_vbt_mode;
  	u16 burst_mode_ratio;
  	enum port port;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 22666d28f4aa..6ced95b73538 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -133,7 +133,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
  static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
  {
  	struct drm_device *dev = connector->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
  	u32 tmp;
@@ -148,7 +148,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
  static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
  				   enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
  	u32 tmp;
@@ -162,7 +162,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
  static void intel_dvo_get_config(struct intel_encoder *encoder,
  				 struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
  	u32 tmp, flags = 0;
@@ -187,7 +187,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
  			      const struct intel_crtc_state *old_crtc_state,
  			      const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
  	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
  	u32 temp = I915_READ(dvo_reg);
@@ -201,7 +201,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
  			     const struct intel_crtc_state *pipe_config,
  			     const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
  	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
  	u32 temp = I915_READ(dvo_reg);
@@ -276,7 +276,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
  				 const struct intel_crtc_state *pipe_config,
  				 const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
  	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -319,7 +319,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	const struct drm_display_mode *fixed_mode =
  		to_intel_connector(connector)->panel.fixed_mode;
@@ -330,7 +330,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
  	 * that's not the case.
  	 */
  	intel_ddc_get_modes(connector,
-			    intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+			    intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC));
  	if (!list_empty(&connector->probed_modes))
  		return 1;
@@ -406,7 +406,7 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg)
  		return PORT_C;
  }
-void intel_dvo_init(struct drm_i915_private *dev_priv)
+void intel_dvo_init(struct drm_i915_private *i915)
  {
  	struct intel_encoder *intel_encoder;
  	struct intel_dvo *intel_dvo;
@@ -452,7 +452,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  		 * special cases, but otherwise default to what's defined
  		 * in the spec.
  		 */
-		if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+		if (intel_gmbus_is_valid_pin(i915, dvo->gpio))
  			gpio = dvo->gpio;
  		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
  			gpio = GMBUS_PIN_SSC;
@@ -464,7 +464,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  		 * It appears that everything is on GPIOE except for panels
  		 * on i830 laptops, which are on GPIOB (DVOA).
  		 */
-		i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+		i2c = intel_gmbus_get_adapter(i915, gpio);
intel_dvo->dev = *dvo; @@ -480,7 +480,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  		 * have the clock enabled before we attempt to
  		 * initialize the device.
  		 */
-		for_each_pipe(dev_priv, pipe) {
+		for_each_pipe(i915, pipe) {
  			dpll[pipe] = I915_READ(DPLL(pipe));
  			I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
  		}
@@ -488,7 +488,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  		dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
-		for_each_pipe(dev_priv, pipe) {
+		for_each_pipe(i915, pipe) {
  			I915_WRITE(DPLL(pipe), dpll[pipe]);
  		}
@@ -498,7 +498,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  			continue;
port = intel_dvo_port(dvo->dvo_reg);
-		drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+		drm_encoder_init(&i915->drm, &intel_encoder->base,
  				 &intel_dvo_enc_funcs, encoder_type,
  				 "DVO %c", port_name(port));
@@ -511,14 +511,14 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
  		case INTEL_DVO_CHIP_TMDS:
  			intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
  				(1 << INTEL_OUTPUT_DVO);
-			drm_connector_init(&dev_priv->drm, connector,
+			drm_connector_init(&i915->drm, connector,
  					   &intel_dvo_connector_funcs,
  					   DRM_MODE_CONNECTOR_DVII);
  			encoder_type = DRM_MODE_ENCODER_TMDS;
  			break;
  		case INTEL_DVO_CHIP_LVDS:
  			intel_encoder->cloneable = 0;
-			drm_connector_init(&dev_priv->drm, connector,
+			drm_connector_init(&i915->drm, connector,
  					   &intel_dvo_connector_funcs,
  					   DRM_MODE_CONNECTOR_LVDS);
  			encoder_type = DRM_MODE_ENCODER_LVDS;
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/intel_dvo.h
index 3ed0fdf8efff..d865cd439bb7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.h
+++ b/drivers/gpu/drm/i915/intel_dvo.h
@@ -8,6 +8,6 @@
struct drm_i915_private; -void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct drm_i915_private *i915);
#endif /* __INTEL_DVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo_dev.h b/drivers/gpu/drm/i915/intel_dvo_dev.h
index 94a6ae1e0292..1d184dc7922b 100644
--- a/drivers/gpu/drm/i915/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/intel_dvo_dev.h
@@ -40,7 +40,7 @@ struct intel_dvo_device {
  	int slave_addr;
const struct intel_dvo_dev_ops *dev_ops;
-	void *dev_priv;
+	void *i915;
  	struct i2c_adapter *i2c_bus;
  };
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 5679f2fffb7c..627fed2a55d2 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -45,14 +45,14 @@
  #include "intel_fbc.h"
  #include "intel_frontbuffer.h"
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
+static inline bool fbc_supported(struct drm_i915_private *i915)
  {
-	return HAS_FBC(dev_priv);
+	return HAS_FBC(i915);
  }
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
+static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *i915)
  {
-	return INTEL_GEN(dev_priv) <= 3;
+	return INTEL_GEN(i915) <= 3;
  }
/*
@@ -82,22 +82,22 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
  		*height = cache->plane.src_h;
  }
-static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+static int intel_fbc_calculate_cfb_size(struct drm_i915_private *i915,
  					struct intel_fbc_state_cache *cache)
  {
  	int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
-	if (IS_GEN(dev_priv, 7))
+	if (IS_GEN(i915, 7))
  		lines = min(lines, 2048);
-	else if (INTEL_GEN(dev_priv) >= 8)
+	else if (INTEL_GEN(i915) >= 8)
  		lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
  	return lines * cache->fb.stride;
  }
-static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void i8xx_fbc_deactivate(struct drm_i915_private *i915)
  {
  	u32 fbc_ctl;
@@ -110,7 +110,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
  	I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    FBC_STATUS, FBC_STAT_COMPRESSING, 0,
  				    10)) {
  		DRM_DEBUG_KMS("FBC idle timed out\n");
@@ -118,9 +118,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
  	}
  }
-static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
+static void i8xx_fbc_activate(struct drm_i915_private *i915)
  {
-	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+	struct intel_fbc_reg_params *params = &i915->fbc.params;
  	int cfb_pitch;
  	int i;
  	u32 fbc_ctl;
@@ -131,7 +131,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
  		cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
-	if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		cfb_pitch = (cfb_pitch / 32) - 1;
  	else
  		cfb_pitch = (cfb_pitch / 64) - 1;
@@ -140,7 +140,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
  	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  		I915_WRITE(FBC_TAG(i), 0);
- if (IS_GEN(dev_priv, 4)) {
+	if (IS_GEN(i915, 4)) {
  		u32 fbc_ctl2;
/* Set it up... */
@@ -154,21 +154,21 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
  	fbc_ctl = I915_READ(FBC_CONTROL);
  	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
  	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-	if (IS_I945GM(dev_priv))
+	if (IS_I945GM(i915))
  		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  	fbc_ctl |= params->vma->fence->id;
  	I915_WRITE(FBC_CONTROL, fbc_ctl);
  }
-static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
+static bool i8xx_fbc_is_active(struct drm_i915_private *i915)
  {
  	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  }
-static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
+static void g4x_fbc_activate(struct drm_i915_private *i915)
  {
-	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+	struct intel_fbc_reg_params *params = &i915->fbc.params;
  	u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
@@ -188,7 +188,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
  	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  }
-static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void g4x_fbc_deactivate(struct drm_i915_private *i915)
  {
  	u32 dpfc_ctl;
@@ -200,23 +200,23 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
  	}
  }
-static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
+static bool g4x_fbc_is_active(struct drm_i915_private *i915)
  {
  	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  }
/* This function forces a CFB recompression through the nuke operation. */
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+static void intel_fbc_recompress(struct drm_i915_private *i915)
  {
  	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
  	POSTING_READ(MSG_FBC_REND_STATE);
  }
-static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
+static void ilk_fbc_activate(struct drm_i915_private *i915)
  {
-	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+	struct intel_fbc_reg_params *params = &i915->fbc.params;
  	u32 dpfc_ctl;
-	int threshold = dev_priv->fbc.threshold;
+	int threshold = i915->fbc.threshold;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
  	if (params->fb.format->cpp[0] == 2)
@@ -237,9 +237,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (params->flags & PLANE_HAS_FENCE) {
  		dpfc_ctl |= DPFC_CTL_FENCE_EN;
-		if (IS_GEN(dev_priv, 5))
+		if (IS_GEN(i915, 5))
  			dpfc_ctl |= params->vma->fence->id;
-		if (IS_GEN(dev_priv, 6)) {
+		if (IS_GEN(i915, 6)) {
  			I915_WRITE(SNB_DPFC_CTL_SA,
  				   SNB_CPU_FENCE_ENABLE |
  				   params->vma->fence->id);
@@ -247,7 +247,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
  				   params->crtc.fence_y_offset);
  		}
  	} else {
-		if (IS_GEN(dev_priv, 6)) {
+		if (IS_GEN(i915, 6)) {
  			I915_WRITE(SNB_DPFC_CTL_SA, 0);
  			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
  		}
@@ -259,10 +259,10 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
  	/* enable it... */
  	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- intel_fbc_recompress(dev_priv);
+	intel_fbc_recompress(i915);
  }
-static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void ilk_fbc_deactivate(struct drm_i915_private *i915)
  {
  	u32 dpfc_ctl;
@@ -274,19 +274,19 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
  	}
  }
-static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
+static bool ilk_fbc_is_active(struct drm_i915_private *i915)
  {
  	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  }
-static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
+static void gen7_fbc_activate(struct drm_i915_private *i915)
  {
-	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+	struct intel_fbc_reg_params *params = &i915->fbc.params;
  	u32 dpfc_ctl;
-	int threshold = dev_priv->fbc.threshold;
+	int threshold = i915->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
-	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEN(i915, 9) && !IS_GEMINILAKE(i915)) {
  		u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -299,7 +299,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
  	}
dpfc_ctl = 0;
-	if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fb.format->cpp[0] == 2)
@@ -329,15 +329,15 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
  		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
  	}
- if (dev_priv->fbc.false_color)
+	if (i915->fbc.false_color)
  		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
- if (IS_IVYBRIDGE(dev_priv)) {
+	if (IS_IVYBRIDGE(i915)) {
  		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
  		I915_WRITE(ILK_DISPLAY_CHICKEN1,
  			   I915_READ(ILK_DISPLAY_CHICKEN1) |
  			   ILK_FBCQ_DIS);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
  			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
@@ -346,72 +346,72 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - intel_fbc_recompress(dev_priv);
+	intel_fbc_recompress(i915);
  }
-static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+static bool intel_fbc_hw_is_active(struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 5)
-		return ilk_fbc_is_active(dev_priv);
-	else if (IS_GM45(dev_priv))
-		return g4x_fbc_is_active(dev_priv);
+	if (INTEL_GEN(i915) >= 5)
+		return ilk_fbc_is_active(i915);
+	else if (IS_GM45(i915))
+		return g4x_fbc_is_active(i915);
  	else
-		return i8xx_fbc_is_active(dev_priv);
+		return i8xx_fbc_is_active(i915);
  }
-static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+static void intel_fbc_hw_activate(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
fbc->active = true; - if (INTEL_GEN(dev_priv) >= 7)
-		gen7_fbc_activate(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 5)
-		ilk_fbc_activate(dev_priv);
-	else if (IS_GM45(dev_priv))
-		g4x_fbc_activate(dev_priv);
+	if (INTEL_GEN(i915) >= 7)
+		gen7_fbc_activate(i915);
+	else if (INTEL_GEN(i915) >= 5)
+		ilk_fbc_activate(i915);
+	else if (IS_GM45(i915))
+		g4x_fbc_activate(i915);
  	else
-		i8xx_fbc_activate(dev_priv);
+		i8xx_fbc_activate(i915);
  }
-static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_hw_deactivate(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
fbc->active = false; - if (INTEL_GEN(dev_priv) >= 5)
-		ilk_fbc_deactivate(dev_priv);
-	else if (IS_GM45(dev_priv))
-		g4x_fbc_deactivate(dev_priv);
+	if (INTEL_GEN(i915) >= 5)
+		ilk_fbc_deactivate(i915);
+	else if (IS_GM45(i915))
+		g4x_fbc_deactivate(i915);
  	else
-		i8xx_fbc_deactivate(dev_priv);
+		i8xx_fbc_deactivate(i915);
  }
/**
   * intel_fbc_is_active - Is FBC active?
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function is used to verify the current state of FBC.
   *
   * FIXME: This should be tracked in the plane config eventually
   * instead of queried at runtime for most callers.
   */
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
+bool intel_fbc_is_active(struct drm_i915_private *i915)
  {
-	return dev_priv->fbc.active;
+	return i915->fbc.active;
  }
-static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
+static void intel_fbc_deactivate(struct drm_i915_private *i915,
  				 const char *reason)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
WARN_ON(!mutex_is_locked(&fbc->lock)); if (fbc->active)
-		intel_fbc_hw_deactivate(dev_priv);
+		intel_fbc_hw_deactivate(i915);
fbc->no_fbc_reason = reason;
  }
@@ -419,12 +419,12 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
  static bool multiple_pipes_ok(struct intel_crtc *crtc,
  			      struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	enum pipe pipe = crtc->pipe;
/* Don't even bother tracking anything we don't need. */
-	if (!no_fbc_on_multiple_pipes(dev_priv))
+	if (!no_fbc_on_multiple_pipes(i915))
  		return true;
if (plane_state->base.visible)
@@ -435,7 +435,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc,
  	return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
  }
-static int find_compression_threshold(struct drm_i915_private *dev_priv,
+static int find_compression_threshold(struct drm_i915_private *i915,
  				      struct drm_mm_node *node,
  				      int size,
  				      int fb_cpp)
@@ -448,8 +448,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
  	 * reserved range size, so it always assumes the maximum (8mb) is used.
  	 * If we enable FBC using a CFB on that memory range we'll get FIFO
  	 * underruns, even if that range is not reserved by the BIOS. */
-	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
-		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
+	if (IS_BROADWELL(i915) || IS_GEN9_BC(i915))
+		end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
  	else
  		end = U64_MAX;
@@ -461,7 +461,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
  	 */
/* Try to over-allocate to reduce reallocations and fragmentation. */
-	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+	ret = i915_gem_stolen_insert_node_in_range(i915, node, size <<= 1,
  						   4096, 0, end);
  	if (ret == 0)
  		return compression_threshold;
@@ -472,9 +472,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
  	    (fb_cpp == 2 && compression_threshold == 2))
  		return 0;
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+	ret = i915_gem_stolen_insert_node_in_range(i915, node, size >>= 1,
  						   4096, 0, end);
-	if (ret && INTEL_GEN(dev_priv) <= 4) {
+	if (ret && INTEL_GEN(i915) <= 4) {
  		return 0;
  	} else if (ret) {
  		compression_threshold <<= 1;
@@ -486,17 +486,17 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	struct drm_mm_node *uninitialized_var(compressed_llb);
  	int size, fb_cpp, ret;
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); - size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
+	size = intel_fbc_calculate_cfb_size(i915, &fbc->state_cache);
  	fb_cpp = fbc->state_cache.fb.format->cpp[0];
- ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
+	ret = find_compression_threshold(i915, &fbc->compressed_fb,
  					 size, fb_cpp);
  	if (!ret)
  		goto err_llb;
@@ -507,32 +507,32 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret; - if (INTEL_GEN(dev_priv) >= 5)
+	if (INTEL_GEN(i915) >= 5)
  		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
-	else if (IS_GM45(dev_priv)) {
+	else if (IS_GM45(i915)) {
  		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
  	} else {
  		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
  		if (!compressed_llb)
  			goto err_fb;
- ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+		ret = i915_gem_stolen_insert_node(i915, compressed_llb,
  						  4096, 4096);
  		if (ret)
  			goto err_fb;
fbc->compressed_llb = compressed_llb; - GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+		GEM_BUG_ON(range_overflows_t(u64, i915->dsm.start,
  					     fbc->compressed_fb.start,
  					     U32_MAX));
-		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+		GEM_BUG_ON(range_overflows_t(u64, i915->dsm.start,
  					     fbc->compressed_llb->start,
  					     U32_MAX));
  		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->dsm.start + fbc->compressed_fb.start);
+			   i915->dsm.start + fbc->compressed_fb.start);
  		I915_WRITE(FBC_LL_BASE,
-			   dev_priv->dsm.start + compressed_llb->start);
+			   i915->dsm.start + compressed_llb->start);
  	}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -542,39 +542,39 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
err_fb:
  	kfree(compressed_llb);
-	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+	i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
  err_llb:
-	if (drm_mm_initialized(&dev_priv->mm.stolen))
+	if (drm_mm_initialized(&i915->mm.stolen))
  		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
  	return -ENOSPC;
  }
-static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
if (drm_mm_node_allocated(&fbc->compressed_fb))
-		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+		i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
if (fbc->compressed_llb) {
-		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+		i915_gem_stolen_remove_node(i915, fbc->compressed_llb);
  		kfree(fbc->compressed_llb);
  	}
  }
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+void intel_fbc_cleanup_cfb(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
-	__intel_fbc_cleanup_cfb(dev_priv);
+	__intel_fbc_cleanup_cfb(i915);
  	mutex_unlock(&fbc->lock);
  }
-static bool stride_is_valid(struct drm_i915_private *dev_priv,
+static bool stride_is_valid(struct drm_i915_private *i915,
  			    unsigned int stride)
  {
  	/* This should have been caught earlier. */
@@ -585,10 +585,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
  	if (stride < 512)
  		return false;
- if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
+	if (IS_GEN(i915, 2) || IS_GEN(i915, 3))
  		return stride == 4096 || stride == 8192;
- if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
+	if (IS_GEN(i915, 4) && !IS_G4X(i915) && stride < 2048)
  		return false;
if (stride > 16384)
@@ -597,7 +597,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
  	return true;
  }
-static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
+static bool pixel_format_is_valid(struct drm_i915_private *i915,
  				  u32 pixel_format)
  {
  	switch (pixel_format) {
@@ -607,10 +607,10 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
  	case DRM_FORMAT_XRGB1555:
  	case DRM_FORMAT_RGB565:
  		/* 16bpp not supported on gen2 */
-		if (IS_GEN(dev_priv, 2))
+		if (IS_GEN(i915, 2))
  			return false;
  		/* WaFbcOnly1to1Ratio:ctg */
-		if (IS_G4X(dev_priv))
+		if (IS_G4X(i915))
  			return false;
  		return true;
  	default:
@@ -626,17 +626,17 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
   */
  static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)) {
  		max_w = 5120;
  		max_h = 4096;
-	} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+	} else if (INTEL_GEN(i915) >= 8 || IS_HASWELL(i915)) {
  		max_w = 4096;
  		max_h = 4096;
-	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+	} else if (IS_G4X(i915) || INTEL_GEN(i915) >= 5) {
  		max_w = 4096;
  		max_h = 2048;
  	} else {
@@ -656,8 +656,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
  					 struct intel_crtc_state *crtc_state,
  					 struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	struct intel_fbc_state_cache *cache = &fbc->state_cache;
  	struct drm_framebuffer *fb = plane_state->base.fb;
@@ -665,7 +665,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
  	cache->flags = 0;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation;
@@ -697,8 +697,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	struct intel_fbc_state_cache *cache = &fbc->state_cache;
/* We don't need to use a state cache here since this information is
@@ -741,18 +741,18 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
  		return false;
  	}
-	if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+	if (INTEL_GEN(i915) <= 4 && !IS_G4X(i915) &&
  	    cache->plane.rotation != DRM_MODE_ROTATE_0) {
  		fbc->no_fbc_reason = "rotation unsupported";
  		return false;
  	}
- if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+	if (!stride_is_valid(i915, cache->fb.stride)) {
  		fbc->no_fbc_reason = "framebuffer stride not supported";
  		return false;
  	}
- if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
+	if (!pixel_format_is_valid(i915, cache->fb.format->format)) {
  		fbc->no_fbc_reason = "pixel format is invalid";
  		return false;
  	}
@@ -764,8 +764,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  	}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
-	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
-	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
+	if ((IS_HASWELL(i915) || IS_BROADWELL(i915)) &&
+	    cache->crtc.hsw_bdw_pixel_rate >= i915->cdclk.hw.cdclk * 95 / 100) {
  		fbc->no_fbc_reason = "pixel rate is too big";
  		return false;
  	}
@@ -780,7 +780,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  	 * we didn't get any invalidate/deactivate calls, but this would require
  	 * a lot of tracking just for a specific case. If we conclude it's an
  	 * important case, we can implement it later. */
-	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+	if (intel_fbc_calculate_cfb_size(i915, &fbc->state_cache) >
  	    fbc->compressed_fb.size * fbc->threshold) {
  		fbc->no_fbc_reason = "CFB requirements changed";
  		return false;
@@ -791,7 +791,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
  	 * and screen flicker.
  	 */
-	if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+	if (IS_GEN_RANGE(i915, 9, 10) &&
  	    (fbc->state_cache.plane.adjusted_y & 3)) {
  		fbc->no_fbc_reason = "plane Y offset is misaligned";
  		return false;
@@ -800,11 +800,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  	return true;
  }
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+static bool intel_fbc_can_enable(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (intel_vgpu_active(dev_priv)) {
+	if (intel_vgpu_active(i915)) {
  		fbc->no_fbc_reason = "VGPU is active";
  		return false;
  	}
@@ -825,8 +825,8 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
  static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
  				     struct intel_fbc_reg_params *params)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	struct intel_fbc_state_cache *cache = &fbc->state_cache;
/* Since all our fields are integer types, use memset here so the
@@ -844,9 +844,9 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
  	params->fb.format = cache->fb.format;
  	params->fb.stride = cache->fb.stride;
- params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+	params->cfb_size = intel_fbc_calculate_cfb_size(i915, cache);
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+	if (IS_GEN(i915, 9) && !IS_GEMINILAKE(i915))
  		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
  						32 * fbc->threshold) * 8;
  }
@@ -855,11 +855,11 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
  			  struct intel_crtc_state *crtc_state,
  			  struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
  	const char *reason = "update pending";
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
@@ -876,21 +876,21 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
  	fbc->flip_pending = true;
deactivate:
-	intel_fbc_deactivate(dev_priv, reason);
+	intel_fbc_deactivate(i915, reason);
  unlock:
  	mutex_unlock(&fbc->lock);
  }
/**
   * __intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This is the low level function that actually disables FBC. Callers should
   * grab the FBC lock.
   */
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+static void __intel_fbc_disable(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
  	struct intel_crtc *crtc = fbc->crtc;
WARN_ON(!mutex_is_locked(&fbc->lock));
@@ -899,7 +899,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); - __intel_fbc_cleanup_cfb(dev_priv);
+	__intel_fbc_cleanup_cfb(i915);
fbc->enabled = false;
  	fbc->crtc = NULL;
@@ -907,8 +907,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
static void __intel_fbc_post_update(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
WARN_ON(!mutex_is_locked(&fbc->lock)); @@ -919,8 +919,8 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
  	WARN_ON(fbc->active);
if (!i915_modparams.enable_fbc) {
-		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
-		__intel_fbc_disable(dev_priv);
+		intel_fbc_deactivate(i915, "disabled at runtime per module param");
+		__intel_fbc_disable(i915);
return;
  	}
@@ -931,18 +931,18 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
  		return;
if (!fbc->busy_bits) {
-		intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
-		intel_fbc_hw_activate(dev_priv);
+		intel_fbc_deactivate(i915, "FBC enabled (active or scheduled)");
+		intel_fbc_hw_activate(i915);
  	} else
-		intel_fbc_deactivate(dev_priv, "frontbuffer write");
+		intel_fbc_deactivate(i915, "frontbuffer write");
  }
void intel_fbc_post_update(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
@@ -958,13 +958,13 @@ static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
  		return fbc->possible_framebuffer_bits;
  }
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+void intel_fbc_invalidate(struct drm_i915_private *i915,
  			  unsigned int frontbuffer_bits,
  			  enum fb_op_origin origin)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
@@ -975,17 +975,17 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
  	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
if (fbc->enabled && fbc->busy_bits)
-		intel_fbc_deactivate(dev_priv, "frontbuffer write");
+		intel_fbc_deactivate(i915, "frontbuffer write");
mutex_unlock(&fbc->lock);
  }
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
+void intel_fbc_flush(struct drm_i915_private *i915,
  		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
@@ -998,7 +998,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
  	if (!fbc->busy_bits && fbc->enabled &&
  	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
  		if (fbc->active)
-			intel_fbc_recompress(dev_priv);
+			intel_fbc_recompress(i915);
  		else if (!fbc->flip_pending)
  			__intel_fbc_post_update(fbc->crtc);
  	}
@@ -1009,7 +1009,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
/**
   * intel_fbc_choose_crtc - select a CRTC to enable FBC on
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @state: the atomic state structure
   *
   * This function looks at the proposed state for CRTCs and planes, then chooses
@@ -1017,12 +1017,12 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
   * true.
   *
   * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
- * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
+ * enable FBC for the chosen CRTC. If it does, it will set i915->fbc.crtc.
   */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+void intel_fbc_choose_crtc(struct drm_i915_private *i915,
  			   struct intel_atomic_state *state)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
  	struct intel_plane *plane;
  	struct intel_plane_state *plane_state;
  	bool crtc_chosen = false;
@@ -1035,7 +1035,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
  	    !intel_atomic_get_new_crtc_state(state, fbc->crtc))
  		goto out;
- if (!intel_fbc_can_enable(dev_priv))
+	if (!intel_fbc_can_enable(i915))
  		goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1081,10 +1081,10 @@ void intel_fbc_enable(struct intel_crtc *crtc,
  		      struct intel_crtc_state *crtc_state,
  		      struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
@@ -1127,44 +1127,44 @@ void intel_fbc_enable(struct intel_crtc *crtc,
   */
  void intel_fbc_disable(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
  	if (fbc->crtc == crtc)
-		__intel_fbc_disable(dev_priv);
+		__intel_fbc_disable(i915);
  	mutex_unlock(&fbc->lock);
  }
/**
   * intel_fbc_global_disable - globally disable FBC
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function disables FBC regardless of which CRTC is associated with it.
   */
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
+void intel_fbc_global_disable(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
mutex_lock(&fbc->lock);
  	if (fbc->enabled) {
  		WARN_ON(fbc->crtc->active);
-		__intel_fbc_disable(dev_priv);
+		__intel_fbc_disable(i915);
  	}
  	mutex_unlock(&fbc->lock);
  }
static void intel_fbc_underrun_work_fn(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private, fbc.underrun_work);
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
mutex_lock(&fbc->lock); @@ -1175,42 +1175,42 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
  	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
  	fbc->underrun_detected = true;
- intel_fbc_deactivate(dev_priv, "FIFO underrun");
+	intel_fbc_deactivate(i915, "FIFO underrun");
  out:
  	mutex_unlock(&fbc->lock);
  }
/*
   * intel_fbc_reset_underrun - reset FBC fifo underrun status.
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
   * want to re-enable FBC after an underrun to increase test coverage.
   */
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
+int intel_fbc_reset_underrun(struct drm_i915_private *i915)
  {
  	int ret;
- cancel_work_sync(&dev_priv->fbc.underrun_work);
+	cancel_work_sync(&i915->fbc.underrun_work);
- ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
+	ret = mutex_lock_interruptible(&i915->fbc.lock);
  	if (ret)
  		return ret;
- if (dev_priv->fbc.underrun_detected) {
+	if (i915->fbc.underrun_detected) {
  		DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
-		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
+		i915->fbc.no_fbc_reason = "FIFO underrun cleared";
  	}
- dev_priv->fbc.underrun_detected = false;
-	mutex_unlock(&dev_priv->fbc.lock);
+	i915->fbc.underrun_detected = false;
+	mutex_unlock(&i915->fbc.lock);
return 0;
  }
/**
   * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Without FBC, most underruns are harmless and don't really cause too many
   * problems, except for an annoying message on dmesg. With FBC, underruns can
@@ -1222,11 +1222,11 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
   *
   * This function is called from the IRQ handler.
   */
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
- if (!fbc_supported(dev_priv))
+	if (!fbc_supported(i915))
  		return;
/* There's no guarantee that underrun_detected won't be set to true
@@ -1243,24 +1243,24 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
/**
   * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * The FBC code needs to track CRTC visibility since the older platforms can't
   * have FBC enabled while multiple pipes are used. This function does the
   * initial setup at driver load to make sure FBC is matching the real hardware.
   */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
+void intel_fbc_init_pipe_state(struct drm_i915_private *i915)
  {
  	struct intel_crtc *crtc;
/* Don't even bother tracking anything if we don't need. */
-	if (!no_fbc_on_multiple_pipes(dev_priv))
+	if (!no_fbc_on_multiple_pipes(i915))
  		return;
- for_each_intel_crtc(&dev_priv->drm, crtc)
+	for_each_intel_crtc(&i915->drm, crtc)
  		if (intel_crtc_active(crtc) &&
  		    crtc->base.primary->state->visible)
-			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
+			i915->fbc.visible_pipes_mask |= (1 << crtc->pipe);
  }
/*
@@ -1272,29 +1272,29 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
   * space to change the value during runtime without sanitizing it again. IGT
   * relies on being able to change i915.enable_fbc at runtime.
   */
-static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
  {
  	if (i915_modparams.enable_fbc >= 0)
  		return !!i915_modparams.enable_fbc;
- if (!HAS_FBC(dev_priv))
+	if (!HAS_FBC(i915))
  		return 0;
/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
-	if (IS_GEMINILAKE(dev_priv))
+	if (IS_GEMINILAKE(i915))
  		return 0;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
+	if (IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9)
  		return 1;
return 0;
  }
-static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
  {
  	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
  	if (intel_vtd_active() &&
-	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+	    (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
  		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
  		return true;
  	}
@@ -1304,38 +1304,38 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
/**
   * intel_fbc_init - Initialize FBC
- * @dev_priv: the i915 device
+ * @i915: the i915 device
   *
   * This function might be called during PM init process.
   */
-void intel_fbc_init(struct drm_i915_private *dev_priv)
+void intel_fbc_init(struct drm_i915_private *i915)
  {
-	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc *fbc = &i915->fbc;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
  	mutex_init(&fbc->lock);
  	fbc->enabled = false;
  	fbc->active = false;
- if (need_fbc_vtd_wa(dev_priv))
-		mkwrite_device_info(dev_priv)->display.has_fbc = false;
+	if (need_fbc_vtd_wa(i915))
+		mkwrite_device_info(i915)->display.has_fbc = false;
- i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+	i915_modparams.enable_fbc = intel_sanitize_fbc_option(i915);
  	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
  		      i915_modparams.enable_fbc);
- if (!HAS_FBC(dev_priv)) {
+	if (!HAS_FBC(i915)) {
  		fbc->no_fbc_reason = "unsupported by this chipset";
  		return;
  	}
/* This value was pulled out of someone's hat */
-	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
+	if (INTEL_GEN(i915) <= 4 && !IS_GM45(i915))
  		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
  	 * deactivate it in case the BIOS activated it to make sure software
  	 * matches the hardware state. */
-	if (intel_fbc_hw_is_active(dev_priv))
-		intel_fbc_hw_deactivate(dev_priv);
+	if (intel_fbc_hw_is_active(i915))
+		intel_fbc_hw_deactivate(i915);
  }
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/intel_fbc.h
index 50272eda8d43..b7dc6cb43497 100644
--- a/drivers/gpu/drm/i915/intel_fbc.h
+++ b/drivers/gpu/drm/i915/intel_fbc.h
@@ -16,27 +16,27 @@ struct intel_crtc;
  struct intel_crtc_state;
  struct intel_plane_state;
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+void intel_fbc_choose_crtc(struct drm_i915_private *i915,
  			   struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+bool intel_fbc_is_active(struct drm_i915_private *i915);
  void intel_fbc_pre_update(struct intel_crtc *crtc,
  			  struct intel_crtc_state *crtc_state,
  			  struct intel_plane_state *plane_state);
  void intel_fbc_post_update(struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
+void intel_fbc_init(struct drm_i915_private *i915);
+void intel_fbc_init_pipe_state(struct drm_i915_private *i915);
  void intel_fbc_enable(struct intel_crtc *crtc,
  		      struct intel_crtc_state *crtc_state,
  		      struct intel_plane_state *plane_state);
  void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+void intel_fbc_global_disable(struct drm_i915_private *i915);
+void intel_fbc_invalidate(struct drm_i915_private *i915,
  			  unsigned int frontbuffer_bits,
  			  enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
+void intel_fbc_flush(struct drm_i915_private *i915,
  		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *i915);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915);
+int intel_fbc_reset_underrun(struct drm_i915_private *i915);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 0d3a6fa674e6..93dee9ff5a58 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -117,7 +117,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
  		container_of(helper, struct intel_fbdev, helper);
  	struct drm_framebuffer *fb;
  	struct drm_device *dev = helper->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_mode_fb_cmd2 mode_cmd = {};
  	struct drm_i915_gem_object *obj;
  	int size, ret;
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
  	 * important and we should probably use that space with FBC or other
  	 * features. */
  	obj = NULL;
-	if (size * 2 < dev_priv->stolen_usable_size)
-		obj = i915_gem_object_create_stolen(dev_priv, size);
+	if (size * 2 < i915->stolen_usable_size)
+		obj = i915_gem_object_create_stolen(i915, size);
  	if (obj == NULL)
-		obj = i915_gem_object_create_shmem(dev_priv, size);
+		obj = i915_gem_object_create_shmem(i915, size);
  	if (IS_ERR(obj)) {
  		DRM_ERROR("failed to allocate framebuffer\n");
  		ret = PTR_ERR(obj);
@@ -174,9 +174,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
  		container_of(helper, struct intel_fbdev, helper);
  	struct intel_framebuffer *intel_fb = ifbdev->fb;
  	struct drm_device *dev = helper->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct pci_dev *pdev = i915->drm.pdev;
+	struct i915_ggtt *ggtt = &i915->ggtt;
  	const struct i915_ggtt_view view = {
  		.type = I915_GGTT_VIEW_NORMAL,
  	};
@@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
  	}
mutex_lock(&dev->struct_mutex);
-	wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
/* Pin the GGTT vma for our access via info->screen_base.
  	 * This also validates that any existing fb inherited from the
@@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
  	ifbdev->vma = vma;
  	ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  	mutex_unlock(&dev->struct_mutex);
  	vga_switcheroo_client_fb_set(pdev, info);
  	return 0;
@@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
  out_unpin:
  	intel_unpin_fb_vma(vma, flags);
  out_unlock:
-	intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  	mutex_unlock(&dev->struct_mutex);
  	return ret;
  }
@@ -447,11 +447,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_fbdev *ifbdev;
  	int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+	if (WARN_ON(!HAS_DISPLAY(i915)))
  		return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -470,8 +470,8 @@ int intel_fbdev_init(struct drm_device *dev)
  		return ret;
  	}
- dev_priv->fbdev = ifbdev;
-	INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+	i915->fbdev = ifbdev;
+	INIT_WORK(&i915->fbdev_suspend_work, intel_fbdev_suspend_worker);
drm_fb_helper_single_add_all_connectors(&ifbdev->helper); @@ -508,23 +508,23 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
  	ifbdev->cookie = 0;
  }
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+void intel_fbdev_unregister(struct drm_i915_private *i915)
  {
-	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct intel_fbdev *ifbdev = i915->fbdev;
if (!ifbdev)
  		return;
- cancel_work_sync(&dev_priv->fbdev_suspend_work);
+	cancel_work_sync(&i915->fbdev_suspend_work);
  	if (!current_is_async())
  		intel_fbdev_sync(ifbdev);
drm_fb_helper_unregister_fbi(&ifbdev->helper);
  }
-void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+void intel_fbdev_fini(struct drm_i915_private *i915)
  {
-	struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+	struct intel_fbdev *ifbdev = fetch_and_zero(&i915->fbdev);
if (!ifbdev)
  		return;
@@ -554,8 +554,8 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_fbdev *ifbdev = i915->fbdev;
  	struct fb_info *info;
if (!ifbdev || !ifbdev->vma)
@@ -572,7 +572,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
  		 * ourselves, so only flush outstanding work upon suspend!
  		 */
  		if (state != FBINFO_STATE_RUNNING)
-			flush_work(&dev_priv->fbdev_suspend_work);
+			flush_work(&i915->fbdev_suspend_work);
console_lock();
  	} else {
@@ -586,7 +586,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
  			/* Don't block our own workqueue as this can
  			 * be run in parallel with other i915.ko tasks.
  			 */
-			schedule_work(&dev_priv->fbdev_suspend_work);
+			schedule_work(&i915->fbdev_suspend_work);
  			return;
  		}
  	}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/intel_fbdev.h
index de7c84250eb5..4d0c8c851a5e 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/intel_fbdev.h
@@ -14,8 +14,8 @@ struct drm_i915_private;
  #ifdef CONFIG_DRM_FBDEV_EMULATION
  int intel_fbdev_init(struct drm_device *dev);
  void intel_fbdev_initial_config_async(struct drm_device *dev);
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_unregister(struct drm_i915_private *i915);
+void intel_fbdev_fini(struct drm_i915_private *i915);
  void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
  void intel_fbdev_output_poll_changed(struct drm_device *dev);
  void intel_fbdev_restore_mode(struct drm_device *dev);
@@ -29,11 +29,11 @@ static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
  {
  }
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_unregister(struct drm_i915_private *i915)
  {
  }
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_fini(struct drm_i915_private *i915)
  {
  }
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 8545ad32bb50..08f092addae2 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -52,14 +52,14 @@
static bool ivb_can_enable_err_int(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *crtc;
  	enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- for_each_pipe(dev_priv, pipe) {
-		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	for_each_pipe(i915, pipe) {
+		crtc = intel_get_crtc_for_pipe(i915, pipe);
if (crtc->cpu_fifo_underrun_disabled)
  			return false;
@@ -70,14 +70,14 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
static bool cpt_can_enable_serr_int(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum pipe pipe;
  	struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
- for_each_pipe(dev_priv, pipe) {
-		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	for_each_pipe(i915, pipe) {
+		crtc = intel_get_crtc_for_pipe(i915, pipe);
if (crtc->pch_fifo_underrun_disabled)
  			return false;
@@ -88,20 +88,20 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	i915_reg_t reg = PIPESTAT(crtc->pipe);
  	u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
  		return;
- enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
+	enable_mask = i915_pipestat_enable_mask(i915, crtc->pipe);
  	I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
  	POSTING_READ(reg);
- trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
+	trace_intel_cpu_fifo_underrun(i915, crtc->pipe);
  	DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
  }
@@ -109,13 +109,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
  					     enum pipe pipe,
  					     bool enable, bool old)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	i915_reg_t reg = PIPESTAT(pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
if (enable) {
-		u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+		u32 enable_mask = i915_pipestat_enable_mask(i915, pipe);
I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
  		POSTING_READ(reg);
@@ -128,23 +128,23 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
  static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  						 enum pipe pipe, bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 bit = (pipe == PIPE_A) ?
  		DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
-		ilk_enable_display_irq(dev_priv, bit);
+		ilk_enable_display_irq(i915, bit);
  	else
-		ilk_disable_display_irq(dev_priv, bit);
+		ilk_disable_display_irq(i915, bit);
  }
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	u32 err_int = I915_READ(GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
  		return;
@@ -152,7 +152,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
  	I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
  	POSTING_READ(GEN7_ERR_INT);
- trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+	trace_intel_cpu_fifo_underrun(i915, pipe);
  	DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
  }
@@ -160,16 +160,16 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  						  enum pipe pipe,
  						  bool enable, bool old)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	if (enable) {
  		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
if (!ivb_can_enable_err_int(dev))
  			return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+		ilk_enable_display_irq(i915, DE_ERR_INT_IVB);
  	} else {
-		ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+		ilk_disable_display_irq(i915, DE_ERR_INT_IVB);
if (old &&
  		    I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -182,35 +182,35 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
  						  enum pipe pipe, bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
if (enable)
-		bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+		bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_FIFO_UNDERRUN);
  	else
-		bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+		bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_FIFO_UNDERRUN);
  }
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
  					    enum pipe pch_transcoder,
  					    bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 bit = (pch_transcoder == PIPE_A) ?
  		SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
-		ibx_enable_display_interrupt(dev_priv, bit);
+		ibx_enable_display_interrupt(i915, bit);
  	else
-		ibx_disable_display_interrupt(dev_priv, bit);
+		ibx_disable_display_interrupt(i915, bit);
  }
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pch_transcoder = crtc->pipe;
  	u32 serr_int = I915_READ(SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
  		return;
@@ -218,7 +218,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
  	I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
  	POSTING_READ(SERR_INT);
- trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+	trace_intel_pch_fifo_underrun(i915, pch_transcoder);
  	DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
  		  pipe_name(pch_transcoder));
  }
@@ -227,7 +227,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  					    enum pipe pch_transcoder,
  					    bool enable, bool old)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
if (enable) {
  		I915_WRITE(SERR_INT,
@@ -236,9 +236,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  		if (!cpt_can_enable_serr_int(dev))
  			return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+		ibx_enable_display_interrupt(i915, SDE_ERROR_CPT);
  	} else {
-		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+		ibx_disable_display_interrupt(i915, SDE_ERROR_CPT);
if (old && I915_READ(SERR_INT) &
  		    SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -251,22 +251,22 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  						    enum pipe pipe, bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
  	bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
old = !crtc->cpu_fifo_underrun_disabled;
  	crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
-	else if (IS_GEN_RANGE(dev_priv, 5, 6))
+	else if (IS_GEN_RANGE(i915, 5, 6))
  		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-	else if (IS_GEN(dev_priv, 7))
+	else if (IS_GEN(i915, 7))
  		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-	else if (INTEL_GEN(dev_priv) >= 8)
+	else if (INTEL_GEN(i915) >= 8)
  		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -274,7 +274,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
/**
   * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @pipe: (CPU) pipe to set state for
   * @enable: whether underruns should be reported or not
   *
@@ -288,23 +288,23 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
   *
   * Returns the previous state of underrun reporting.
   */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *i915,
  					   enum pipe pipe, bool enable)
  {
  	unsigned long flags;
  	bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
+	spin_lock_irqsave(&i915->irq_lock, flags);
+	ret = __intel_set_cpu_fifo_underrun_reporting(&i915->drm, pipe,
  						      enable);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_unlock_irqrestore(&i915->irq_lock, flags);
return ret;
  }
/**
   * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
   * @enable: whether underruns should be reported or not
   *
@@ -316,12 +316,12 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
   *
   * Returns the previous state of underrun reporting.
   */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *i915,
  					   enum pipe pch_transcoder,
  					   bool enable)
  {
  	struct intel_crtc *crtc =
-		intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
+		intel_get_crtc_for_pipe(i915, pch_transcoder);
  	unsigned long flags;
  	bool old;
@@ -334,71 +334,71 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
  	 * crtc on LPT won't cause issues.
  	 */
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	spin_lock_irqsave(&i915->irq_lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
  	crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv))
-		ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+	if (HAS_PCH_IBX(i915))
+		ibx_set_fifo_underrun_reporting(&i915->drm,
  						pch_transcoder,
  						enable);
  	else
-		cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+		cpt_set_fifo_underrun_reporting(&i915->drm,
  						pch_transcoder,
  						enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_unlock_irqrestore(&i915->irq_lock, flags);
  	return old;
  }
/**
   * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @pipe: (CPU) pipe to set state for
   *
   * This handles a CPU fifo underrun interrupt, generating an underrun warning
   * into dmesg if underrun reporting is enabled and then disables the underrun
   * interrupt to avoid an irq storm.
   */
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *i915,
  					 enum pipe pipe)
  {
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
/* We may be called too early in init, thanks BIOS! */
  	if (crtc == NULL)
  		return;
/* GMCH can't disable fifo underruns, filter them. */
-	if (HAS_GMCH(dev_priv) &&
+	if (HAS_GMCH(i915) &&
  	    crtc->cpu_fifo_underrun_disabled)
  		return;
- if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
-		trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+	if (intel_set_cpu_fifo_underrun_reporting(i915, pipe, false)) {
+		trace_intel_cpu_fifo_underrun(i915, pipe);
  		DRM_ERROR("CPU pipe %c FIFO underrun\n",
  			  pipe_name(pipe));
  	}
- intel_fbc_handle_fifo_underrun_irq(dev_priv);
+	intel_fbc_handle_fifo_underrun_irq(i915);
  }
/**
   * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
   *
   * This handles a PCH fifo underrun interrupt, generating an underrun warning
   * into dmesg if underrun reporting is enabled and then disables the underrun
   * interrupt to avoid an irq storm.
   */
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *i915,
  					 enum pipe pch_transcoder)
  {
-	if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+	if (intel_set_pch_fifo_underrun_reporting(i915, pch_transcoder,
  						  false)) {
-		trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+		trace_intel_pch_fifo_underrun(i915, pch_transcoder);
  		DRM_ERROR("PCH transcoder %c FIFO underrun\n",
  			  pipe_name(pch_transcoder));
  	}
@@ -406,53 +406,53 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/**
   * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
   * error interrupt may have been disabled, and so CPU fifo underruns won't
   * necessarily raise an interrupt, and on GMCH platforms where underruns never
   * raise an interrupt.
   */
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *i915)
  {
  	struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		if (crtc->cpu_fifo_underrun_disabled)
  			continue;
- if (HAS_GMCH(dev_priv))
+		if (HAS_GMCH(i915))
  			i9xx_check_fifo_underruns(crtc);
-		else if (IS_GEN(dev_priv, 7))
+		else if (IS_GEN(i915, 7))
  			ivybridge_check_fifo_underruns(crtc);
  	}
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
/**
   * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
   * error interrupt may have been disabled, and so PCH fifo underruns won't
   * necessarily raise an interrupt.
   */
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
+void intel_check_pch_fifo_underruns(struct drm_i915_private *i915)
  {
  	struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		if (crtc->pch_fifo_underrun_disabled)
  			continue;
- if (HAS_PCH_CPT(dev_priv))
+		if (HAS_PCH_CPT(i915))
  			cpt_check_pch_fifo_underruns(crtc);
  	}
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
  }
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.h b/drivers/gpu/drm/i915/intel_fifo_underrun.h
index e04f22ac1f49..290909a67de8 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.h
@@ -12,16 +12,16 @@
struct drm_i915_private; -bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *i915,
  					   enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *i915,
  					   enum pipe pch_transcoder,
  					   bool enable);
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *i915,
  					 enum pipe pipe);
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *i915,
  					 enum pipe pch_transcoder);
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *i915);
+void intel_check_pch_fifo_underruns(struct drm_i915_private *i915);
#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index d6036b9ad16a..161139de3f28 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -66,24 +66,24 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  			       enum fb_op_origin origin,
  			       unsigned int frontbuffer_bits)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
if (origin == ORIGIN_CS) {
-		spin_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-		spin_unlock(&dev_priv->fb_tracking.lock);
+		spin_lock(&i915->fb_tracking.lock);
+		i915->fb_tracking.busy_bits |= frontbuffer_bits;
+		i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+		spin_unlock(&i915->fb_tracking.lock);
  	}
might_sleep();
-	intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
-	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
-	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
+	intel_psr_invalidate(i915, frontbuffer_bits, origin);
+	intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+	intel_fbc_invalidate(i915, frontbuffer_bits, origin);
  }
/**
   * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   * @origin: which operation caused the flush
   *
@@ -93,45 +93,45 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
   *
   * Can be called without any locks held.
   */
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
+static void intel_frontbuffer_flush(struct drm_i915_private *i915,
  				    unsigned frontbuffer_bits,
  				    enum fb_op_origin origin)
  {
  	/* Delay flushing when rings are still busy.*/
-	spin_lock(&dev_priv->fb_tracking.lock);
-	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
+	frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+	spin_unlock(&i915->fb_tracking.lock);
if (!frontbuffer_bits)
  		return;
might_sleep();
-	intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
-	intel_psr_flush(dev_priv, frontbuffer_bits, origin);
-	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
+	intel_edp_drrs_flush(i915, frontbuffer_bits);
+	intel_psr_flush(i915, frontbuffer_bits, origin);
+	intel_fbc_flush(i915, frontbuffer_bits, origin);
  }
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  			  enum fb_op_origin origin,
  			  unsigned int frontbuffer_bits)
  {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
if (origin == ORIGIN_CS) {
-		spin_lock(&dev_priv->fb_tracking.lock);
+		spin_lock(&i915->fb_tracking.lock);
  		/* Filter out new bits since rendering started. */
-		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		spin_unlock(&dev_priv->fb_tracking.lock);
+		frontbuffer_bits &= i915->fb_tracking.busy_bits;
+		i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+		spin_unlock(&i915->fb_tracking.lock);
  	}
if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+		intel_frontbuffer_flush(i915, frontbuffer_bits, origin);
  }
/**
   * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   *
   * This function gets called after scheduling a flip on @obj. The actual
@@ -141,19 +141,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
   *
   * Can be called without any locks held.
   */
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
  				    unsigned frontbuffer_bits)
  {
-	spin_lock(&dev_priv->fb_tracking.lock);
-	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+	spin_lock(&i915->fb_tracking.lock);
+	i915->fb_tracking.flip_bits |= frontbuffer_bits;
  	/* Remove stale busy bits due to the old buffer. */
-	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
  }
/**
   * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   *
   * This function gets called after the flip has been latched and will complete
@@ -161,23 +161,23 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
   *
   * Can be called without any locks held.
   */
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
  				     unsigned frontbuffer_bits)
  {
-	spin_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
  	/* Mask any cancelled flips. */
-	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	frontbuffer_bits &= i915->fb_tracking.flip_bits;
+	i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev_priv,
+		intel_frontbuffer_flush(i915,
  					frontbuffer_bits, ORIGIN_FLIP);
  }
/**
   * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   *
   * This function gets called after scheduling a flip on @obj. This is for
@@ -186,13 +186,13 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
   *
   * Can be called without any locks held.
   */
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
  			    unsigned frontbuffer_bits)
  {
-	spin_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
  	/* Remove stale busy bits due to the old buffer. */
-	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+	intel_frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
  }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 5727320c8084..55c42efee7ce 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -37,11 +37,11 @@ enum fb_op_origin {
  	ORIGIN_DIRTYFB,
  };
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
  				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
  				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
  			    unsigned frontbuffer_bits);
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_gmbus.c b/drivers/gpu/drm/i915/intel_gmbus.c
index aa88e6e7cc65..f1b9844b591e 100644
--- a/drivers/gpu/drm/i915/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/intel_gmbus.c
@@ -89,42 +89,42 @@ static const struct gmbus_pin gmbus_pins_icp[] = {
  };
/* pin is expected to be valid */
-static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
+static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
  					     unsigned int pin)
  {
-	if (HAS_PCH_ICP(dev_priv))
+	if (HAS_PCH_ICP(i915))
  		return &gmbus_pins_icp[pin];
-	else if (HAS_PCH_CNP(dev_priv))
+	else if (HAS_PCH_CNP(i915))
  		return &gmbus_pins_cnp[pin];
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		return &gmbus_pins_bxt[pin];
-	else if (IS_GEN9_BC(dev_priv))
+	else if (IS_GEN9_BC(i915))
  		return &gmbus_pins_skl[pin];
-	else if (IS_BROADWELL(dev_priv))
+	else if (IS_BROADWELL(i915))
  		return &gmbus_pins_bdw[pin];
  	else
  		return &gmbus_pins[pin];
  }
-bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915,
  			      unsigned int pin)
  {
  	unsigned int size;
- if (HAS_PCH_ICP(dev_priv))
+	if (HAS_PCH_ICP(i915))
  		size = ARRAY_SIZE(gmbus_pins_icp);
-	else if (HAS_PCH_CNP(dev_priv))
+	else if (HAS_PCH_CNP(i915))
  		size = ARRAY_SIZE(gmbus_pins_cnp);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		size = ARRAY_SIZE(gmbus_pins_bxt);
-	else if (IS_GEN9_BC(dev_priv))
+	else if (IS_GEN9_BC(i915))
  		size = ARRAY_SIZE(gmbus_pins_skl);
-	else if (IS_BROADWELL(dev_priv))
+	else if (IS_BROADWELL(i915))
  		size = ARRAY_SIZE(gmbus_pins_bdw);
  	else
  		size = ARRAY_SIZE(gmbus_pins);
- return pin < size && get_gmbus_pin(dev_priv, pin)->name;
+	return pin < size && get_gmbus_pin(i915, pin)->name;
  }
/* Intel GPIO access functions */
@@ -138,13 +138,13 @@ to_intel_gmbus(struct i2c_adapter *i2c)
  }
void
-intel_gmbus_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *i915)
  {
  	I915_WRITE(GMBUS0, 0);
  	I915_WRITE(GMBUS4, 0);
  }
-static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
  				   bool enable)
  {
  	u32 val;
@@ -158,7 +158,7 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
  	I915_WRITE(DSPCLK_GATE_D, val);
  }
-static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
  				   bool enable)
  {
  	u32 val;
@@ -171,7 +171,7 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
  	I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  }
-static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
  				   bool enable)
  {
  	u32 val;
@@ -186,7 +186,7 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
static u32 get_reserved(struct intel_gmbus *bus)
  {
-	struct drm_i915_private *i915 = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
  	struct intel_uncore *uncore = &i915->uncore;
  	u32 reserved = 0;
@@ -202,7 +202,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
  static int get_clock(void *data)
  {
  	struct intel_gmbus *bus = data;
-	struct intel_uncore *uncore = &bus->dev_priv->uncore;
+	struct intel_uncore *uncore = &bus->i915->uncore;
  	u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -217,7 +217,7 @@ static int get_clock(void *data)
  static int get_data(void *data)
  {
  	struct intel_gmbus *bus = data;
-	struct intel_uncore *uncore = &bus->dev_priv->uncore;
+	struct intel_uncore *uncore = &bus->i915->uncore;
  	u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -232,7 +232,7 @@ static int get_data(void *data)
  static void set_clock(void *data, int state_high)
  {
  	struct intel_gmbus *bus = data;
-	struct intel_uncore *uncore = &bus->dev_priv->uncore;
+	struct intel_uncore *uncore = &bus->i915->uncore;
  	u32 reserved = get_reserved(bus);
  	u32 clock_bits;
@@ -251,7 +251,7 @@ static void set_clock(void *data, int state_high)
  static void set_data(void *data, int state_high)
  {
  	struct intel_gmbus *bus = data;
-	struct intel_uncore *uncore = &bus->dev_priv->uncore;
+	struct intel_uncore *uncore = &bus->i915->uncore;
  	u32 reserved = get_reserved(bus);
  	u32 data_bits;
@@ -271,12 +271,12 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
  	struct intel_gmbus *bus = container_of(adapter,
  					       struct intel_gmbus,
  					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
- intel_gmbus_reset(dev_priv);
+	intel_gmbus_reset(i915);
- if (IS_PINEVIEW(dev_priv))
-		pnv_gmbus_clock_gating(dev_priv, false);
+	if (IS_PINEVIEW(i915))
+		pnv_gmbus_clock_gating(i915, false);
set_data(bus, 1);
  	set_clock(bus, 1);
@@ -290,24 +290,24 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
  	struct intel_gmbus *bus = container_of(adapter,
  					       struct intel_gmbus,
  					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
set_data(bus, 1);
  	set_clock(bus, 1);
- if (IS_PINEVIEW(dev_priv))
-		pnv_gmbus_clock_gating(dev_priv, true);
+	if (IS_PINEVIEW(i915))
+		pnv_gmbus_clock_gating(i915, true);
  }
static void
  intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
  {
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
  	struct i2c_algo_bit_data *algo;
algo = &bus->bit_algo; - bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio);
+	bus->gpio_reg = GPIO(get_gmbus_pin(i915, pin)->gpio);
  	bus->adapter.algo_data = algo;
  	algo->setsda = set_data;
  	algo->setscl = set_clock;
@@ -320,7 +320,7 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
  	algo->data = bus;
  }
-static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
+static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
  {
  	DEFINE_WAIT(wait);
  	u32 gmbus2;
@@ -330,10 +330,10 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
  	 * we also need to check for NAKs besides the hw ready/idle signal, we
  	 * need to wake up periodically and check that ourselves.
  	 */
-	if (!HAS_GMBUS_IRQ(dev_priv))
+	if (!HAS_GMBUS_IRQ(i915))
  		irq_en = 0;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	add_wait_queue(&i915->gmbus_wait_queue, &wait);
  	I915_WRITE_FW(GMBUS4, irq_en);
status |= GMBUS_SATOER;
@@ -342,7 +342,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
  		ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
I915_WRITE_FW(GMBUS4, 0);
-	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	remove_wait_queue(&i915->gmbus_wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
  		return -ENXIO;
@@ -351,7 +351,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
  }
static int
-gmbus_wait_idle(struct drm_i915_private *dev_priv)
+gmbus_wait_idle(struct drm_i915_private *i915)
  {
  	DEFINE_WAIT(wait);
  	u32 irq_enable;
@@ -359,36 +359,36 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
  	irq_enable = 0;
-	if (HAS_GMBUS_IRQ(dev_priv))
+	if (HAS_GMBUS_IRQ(i915))
  		irq_enable = GMBUS_IDLE_EN;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	add_wait_queue(&i915->gmbus_wait_queue, &wait);
  	I915_WRITE_FW(GMBUS4, irq_enable);
- ret = intel_wait_for_register_fw(&dev_priv->uncore,
+	ret = intel_wait_for_register_fw(&i915->uncore,
  					 GMBUS2, GMBUS_ACTIVE, 0,
  					 10);
I915_WRITE_FW(GMBUS4, 0);
-	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	remove_wait_queue(&i915->gmbus_wait_queue, &wait);
return ret;
  }
static inline
-unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
  {
-	return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+	return INTEL_GEN(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
  	       GMBUS_BYTE_COUNT_MAX;
  }
static int
-gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_read_chunk(struct drm_i915_private *i915,
  		      unsigned short addr, u8 *buf, unsigned int len,
  		      u32 gmbus0_reg, u32 gmbus1_index)
  {
  	unsigned int size = len;
-	bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+	bool burst_read = len > gmbus_max_xfer_size(i915);
  	bool extra_byte_added = false;
if (burst_read) {
@@ -414,7 +414,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
  		int ret;
  		u32 val, loop = 0;
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
  		if (ret)
  			return ret;
@@ -446,7 +446,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
  #define INTEL_GMBUS_BURST_READ_MAX_LEN		767U
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
  		u32 gmbus0_reg, u32 gmbus1_index)
  {
  	u8 *buf = msg->buf;
@@ -455,12 +455,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
  	int ret;
do {
-		if (HAS_GMBUS_BURST_READ(dev_priv))
+		if (HAS_GMBUS_BURST_READ(i915))
  			len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
  		else
-			len = min(rx_size, gmbus_max_xfer_size(dev_priv));
+			len = min(rx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+		ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
  					    gmbus0_reg, gmbus1_index);
  		if (ret)
  			return ret;
@@ -473,7 +473,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
  }
static int
-gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_write_chunk(struct drm_i915_private *i915,
  		       unsigned short addr, u8 *buf, unsigned int len,
  		       u32 gmbus1_index)
  {
@@ -502,7 +502,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GMBUS3, val); - ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
  		if (ret)
  			return ret;
  	}
@@ -511,7 +511,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
  }
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
  		 u32 gmbus1_index)
  {
  	u8 *buf = msg->buf;
@@ -520,9 +520,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
  	int ret;
do {
-		len = min(tx_size, gmbus_max_xfer_size(dev_priv));
+		len = min(tx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+		ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
  					     gmbus1_index);
  		if (ret)
  			return ret;
@@ -549,7 +549,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
  }
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
  		 u32 gmbus0_reg)
  {
  	u32 gmbus1_index = 0;
@@ -568,10 +568,10 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
  		I915_WRITE_FW(GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
-		ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+		ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
  				      gmbus1_index);
  	else
-		ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
+		ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
  	if (gmbus5)
@@ -587,15 +587,15 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
  	struct intel_gmbus *bus = container_of(adapter,
  					       struct intel_gmbus,
  					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
  	int i = 0, inc, try = 0;
  	int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
-	if (IS_GEN9_LP(dev_priv))
-		bxt_gmbus_clock_gating(dev_priv, false);
-	else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
-		pch_gmbus_clock_gating(dev_priv, false);
+	if (IS_GEN9_LP(i915))
+		bxt_gmbus_clock_gating(i915, false);
+	else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+		pch_gmbus_clock_gating(i915, false);
retry:
  	I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
@@ -603,18 +603,18 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
  	for (; i < num; i += inc) {
  		inc = 1;
  		if (gmbus_is_index_xfer(msgs, i, num)) {
-			ret = gmbus_index_xfer(dev_priv, &msgs[i],
+			ret = gmbus_index_xfer(i915, &msgs[i],
  					       gmbus0_source | bus->reg0);
  			inc = 2; /* an index transmission is two msgs */
  		} else if (msgs[i].flags & I2C_M_RD) {
-			ret = gmbus_xfer_read(dev_priv, &msgs[i],
+			ret = gmbus_xfer_read(i915, &msgs[i],
  					      gmbus0_source | bus->reg0, 0);
  		} else {
-			ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
+			ret = gmbus_xfer_write(i915, &msgs[i], 0);
  		}
if (!ret)
-			ret = gmbus_wait(dev_priv,
+			ret = gmbus_wait(i915,
  					 GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
  		if (ret == -ETIMEDOUT)
  			goto timeout;
@@ -632,7 +632,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
  	 * We will re-enable it at the start of the next xfer,
  	 * till then let it sleep.
  	 */
-	if (gmbus_wait_idle(dev_priv)) {
+	if (gmbus_wait_idle(i915)) {
  		DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
  			 adapter->name);
  		ret = -ETIMEDOUT;
@@ -656,7 +656,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
  	 * it's slow responding and only answers on the 2nd retry.
  	 */
  	ret = -ENXIO;
-	if (gmbus_wait_idle(dev_priv)) {
+	if (gmbus_wait_idle(i915)) {
  		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
  			      adapter->name);
  		ret = -ETIMEDOUT;
@@ -701,10 +701,10 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
out:
  	/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
-	if (IS_GEN9_LP(dev_priv))
-		bxt_gmbus_clock_gating(dev_priv, true);
-	else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
-		pch_gmbus_clock_gating(dev_priv, true);
+	if (IS_GEN9_LP(i915))
+		bxt_gmbus_clock_gating(i915, true);
+	else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+		pch_gmbus_clock_gating(i915, true);
return ret;
  }
@@ -714,11 +714,11 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
  {
  	struct intel_gmbus *bus =
  		container_of(adapter, struct intel_gmbus, adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
  	intel_wakeref_t wakeref;
  	int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
  		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -730,7 +730,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
  			bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
  	}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
  }
@@ -739,7 +739,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
  {
  	struct intel_gmbus *bus =
  		container_of(adapter, struct intel_gmbus, adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
  	u8 cmd = DRM_HDCP_DDC_AKSV;
  	u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
  	struct i2c_msg msgs[] = {
@@ -759,8 +759,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
  	intel_wakeref_t wakeref;
  	int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
-	mutex_lock(&dev_priv->gmbus_mutex);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+	mutex_lock(&i915->gmbus_mutex);
/*
  	 * In order to output Aksv to the receiver, use an indexed write to
@@ -769,8 +769,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
  	 */
  	ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
- mutex_unlock(&dev_priv->gmbus_mutex);
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+	mutex_unlock(&i915->gmbus_mutex);
+	intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
  }
@@ -793,27 +793,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter,
  			   unsigned int flags)
  {
  	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+	mutex_lock(&i915->gmbus_mutex);
  }
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
  			     unsigned int flags)
  {
  	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
- return mutex_trylock(&dev_priv->gmbus_mutex);
+	return mutex_trylock(&i915->gmbus_mutex);
  }
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
  			     unsigned int flags)
  {
  	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
- mutex_unlock(&dev_priv->gmbus_mutex);
+	mutex_unlock(&i915->gmbus_mutex);
  }
static const struct i2c_lock_operations gmbus_lock_ops = {
@@ -824,45 +824,45 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
/**
   * intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   */
-int intel_gmbus_setup(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	struct intel_gmbus *bus;
  	unsigned int pin;
  	int ret;
- if (!HAS_DISPLAY(dev_priv))
+	if (!HAS_DISPLAY(i915))
  		return 0;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
-	else if (!HAS_GMCH(dev_priv))
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->gpio_mmio_base = VLV_DISPLAY_BASE;
+	else if (!HAS_GMCH(i915))
  		/*
  		 * Broxton uses the same PCH offsets for South Display Engine,
  		 * even though it doesn't have a PCH.
  		 */
-		dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
+		i915->gpio_mmio_base = PCH_DISPLAY_BASE;
- mutex_init(&dev_priv->gmbus_mutex);
-	init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+	mutex_init(&i915->gmbus_mutex);
+	init_waitqueue_head(&i915->gmbus_wait_queue);
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
-		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+	for (pin = 0; pin < ARRAY_SIZE(i915->gmbus); pin++) {
+		if (!intel_gmbus_is_valid_pin(i915, pin))
  			continue;
- bus = &dev_priv->gmbus[pin];
+		bus = &i915->gmbus[pin];
bus->adapter.owner = THIS_MODULE;
  		bus->adapter.class = I2C_CLASS_DDC;
  		snprintf(bus->adapter.name,
  			 sizeof(bus->adapter.name),
  			 "i915 gmbus %s",
-			 get_gmbus_pin(dev_priv, pin)->name);
+			 get_gmbus_pin(i915, pin)->name);
bus->adapter.dev.parent = &pdev->dev;
-		bus->dev_priv = dev_priv;
+		bus->i915 = i915;
bus->adapter.algo = &gmbus_algorithm;
  		bus->adapter.lock_ops = &gmbus_lock_ops;
@@ -877,7 +877,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
  		bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
-		if (IS_I830(dev_priv))
+		if (IS_I830(i915))
  			bus->force_bit = 1;
intel_gpio_setup(bus, pin);
@@ -887,28 +887,28 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
  			goto err;
  	}
- intel_gmbus_reset(dev_priv);
+	intel_gmbus_reset(i915);
return 0; err:
  	while (pin--) {
-		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+		if (!intel_gmbus_is_valid_pin(i915, pin))
  			continue;
- bus = &dev_priv->gmbus[pin];
+		bus = &i915->gmbus[pin];
  		i2c_del_adapter(&bus->adapter);
  	}
  	return ret;
  }
-struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
  					    unsigned int pin)
  {
-	if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+	if (WARN_ON(!intel_gmbus_is_valid_pin(i915, pin)))
  		return NULL;
- return &dev_priv->gmbus[pin].adapter;
+	return &i915->gmbus[pin].adapter;
  }
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
@@ -921,16 +921,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
  void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
  {
  	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+	mutex_lock(&i915->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
  	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
  		      force_bit ? "en" : "dis", adapter->name,
  		      bus->force_bit);
- mutex_unlock(&dev_priv->gmbus_mutex);
+	mutex_unlock(&i915->gmbus_mutex);
  }
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -940,16 +940,16 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
  	return bus->force_bit;
  }
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
+void intel_gmbus_teardown(struct drm_i915_private *i915)
  {
  	struct intel_gmbus *bus;
  	unsigned int pin;
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
-		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+	for (pin = 0; pin < ARRAY_SIZE(i915->gmbus); pin++) {
+		if (!intel_gmbus_is_valid_pin(i915, pin))
  			continue;
- bus = &dev_priv->gmbus[pin];
+		bus = &i915->gmbus[pin];
  		i2c_del_adapter(&bus->adapter);
  	}
  }
diff --git a/drivers/gpu/drm/i915/intel_gmbus.h b/drivers/gpu/drm/i915/intel_gmbus.h
index d989085b8d22..4a4196ca8343 100644
--- a/drivers/gpu/drm/i915/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/intel_gmbus.h
@@ -11,17 +11,17 @@
  struct drm_i915_private;
  struct i2c_adapter;
-int intel_gmbus_setup(struct drm_i915_private *dev_priv);
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
-bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+int intel_gmbus_setup(struct drm_i915_private *i915);
+void intel_gmbus_teardown(struct drm_i915_private *i915);
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915,
  			      unsigned int pin);
  int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
struct i2c_adapter *
-intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+intel_gmbus_get_adapter(struct drm_i915_private *i915, unsigned int pin);
  void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
  bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter);
-void intel_gmbus_reset(struct drm_i915_private *dev_priv);
+void intel_gmbus_reset(struct drm_i915_private *i915);
#endif /* __INTEL_GMBUS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index c40a6efdd33a..f08ac29c2a4f 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -29,14 +29,14 @@
static void gen8_guc_raise_irq(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
  }
static void gen11_guc_raise_irq(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
  }
@@ -52,11 +52,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
void intel_guc_init_send_regs(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	enum forcewake_domains fw_domains = 0;
  	unsigned int i;
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		guc->send_regs.base =
  				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
  		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
@@ -67,7 +67,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
  	}
for (i = 0; i < guc->send_regs.count; i++) {
-		fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
+		fw_domains |= intel_uncore_forcewake_for_reg(&i915->uncore,
  					guc_send_reg(guc, i),
  					FW_REG_READ | FW_REG_WRITE);
  	}
@@ -101,7 +101,7 @@ void intel_guc_init_early(struct intel_guc *guc)
static int guc_init_wq(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
/*
  	 * GuC log buffer flush work item has to do register access to
@@ -135,8 +135,8 @@ static int guc_init_wq(struct intel_guc *guc)
  	 * to make sure we're always sending a single preemption request with a
  	 * single workitem.
  	 */
-	if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
-	    USES_GUC_SUBMISSION(dev_priv)) {
+	if (HAS_LOGICAL_RING_PREEMPTION(i915) &&
+	    USES_GUC_SUBMISSION(i915)) {
  		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
  							  WQ_HIGHPRI);
  		if (!guc->preempt_wq) {
@@ -211,7 +211,7 @@ static void guc_shared_data_destroy(struct intel_guc *guc)
int intel_guc_init(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	int ret;
ret = intel_uc_fw_init(&guc->fw);
@@ -237,7 +237,7 @@ int intel_guc_init(struct intel_guc *guc)
  		goto err_ads;
/* We need to notify the guc whenever we change the GGTT */
-	i915_ggtt_enable_guc(dev_priv);
+	i915_ggtt_enable_guc(i915);
return 0; @@ -256,9 +256,9 @@ int intel_guc_init(struct intel_guc *guc) void intel_guc_fini(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
- i915_ggtt_disable_guc(dev_priv);
+	i915_ggtt_disable_guc(i915);
intel_guc_ct_fini(&guc->ct); @@ -366,7 +366,7 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
   */
  void intel_guc_init_params(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	u32 params[GUC_CTL_MAX_DWORDS];
  	int i;
@@ -386,14 +386,14 @@ void intel_guc_init_params(struct intel_guc *guc)
  	 * they are power context saved so it's ok to release forcewake
  	 * when we are done here and take it again at xfer time.
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
  		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_BLITTER);
  }
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -414,8 +414,8 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
  int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
  			u32 *response_buf, u32 response_buf_size)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 status;
  	int i;
  	int ret;
@@ -497,12 +497,12 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
int intel_guc_sample_forcewake(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
  	/* WaRsDisableCoarsePowerGating:skl,cnl */
-	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+	if (!HAS_RC6(i915) || NEEDS_WaRsDisableCoarsePowerGating(i915))
  		action[1] = 0;
  	else
  		/* bit 0 and 1 are for Render and Media domain separately */
@@ -538,7 +538,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
   */
  int intel_guc_suspend(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	int ret;
  	u32 status;
  	u32 action[] = {
@@ -562,7 +562,7 @@ int intel_guc_suspend(struct intel_guc *guc)
  	if (ret)
  		return ret;
- ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
+	ret = __intel_wait_for_register(&i915->uncore, SOFT_SCRATCH(14),
  					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
  					0, 0, 10, &status);
  	if (ret)
@@ -658,17 +658,17 @@ int intel_guc_resume(struct intel_guc *guc)
   */
  struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct drm_i915_gem_object *obj;
  	struct i915_vma *vma;
  	u64 flags;
  	int ret;
- obj = i915_gem_object_create_shmem(dev_priv, size);
+	obj = i915_gem_object_create_shmem(i915, size);
  	if (IS_ERR(obj))
  		return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
  	if (IS_ERR(vma))
  		goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index ecb69fc94218..4ac9ed2dd467 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -85,7 +85,7 @@ struct __guc_ads_blob {
static int __guc_ads_init(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct __guc_ads_blob *blob;
  	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
  	u32 base;
@@ -115,18 +115,18 @@ static int __guc_ads_init(struct intel_guc *guc)
  		 */
  		blob->ads.golden_context_lrca[engine_class] = 0;
  		blob->ads.eng_state_size[engine_class] =
-			intel_engine_context_size(dev_priv, engine_class) -
+			intel_engine_context_size(i915, engine_class) -
  			skipped_size;
  	}
/* System info */
-	blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+	blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(i915)->sseu.slice_mask);
  	blob->system_info.rcs_enabled = 1;
  	blob->system_info.bcs_enabled = 1;
- blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
-	blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
-	blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+	blob->system_info.vdbox_enable_mask = VDBOX_MASK(i915);
+	blob->system_info.vebox_enable_mask = VEBOX_MASK(i915);
+	blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(i915)->vdbox_sfc_access;
base = intel_guc_ggtt_offset(guc, guc->ads_vma); diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 72cdafd9636a..2beb90c62a40 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -125,7 +125,7 @@ void intel_guc_fw_init_early(struct intel_guc *guc)
static void guc_prepare_xfer(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
/* Must program this register before loading the ucode with DMA */
  	I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
@@ -135,12 +135,12 @@ static void guc_prepare_xfer(struct intel_guc *guc)
  				     GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
  				     GUC_ENABLE_MIA_CLOCK_GATING);
- if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  	else
  		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN(dev_priv, 9)) {
+	if (IS_GEN(i915, 9)) {
  		/* DOP Clock Gating Enable for GuC clocks */
  		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
  					    I915_READ(GEN7_MISCCPCTL)));
@@ -153,7 +153,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
  /* Copy RSA signature from the fw image to HW for verification */
  static void guc_xfer_rsa(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_uc_fw *fw = &guc->fw;
  	struct sg_table *pages = fw->obj->mm.pages;
  	u32 rsa[UOS_RSA_SCRATCH_COUNT];
@@ -168,7 +168,7 @@ static void guc_xfer_rsa(struct intel_guc *guc)
static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
/* Did we complete the xfer? */
  	*status = I915_READ(DMA_CTRL);
@@ -186,7 +186,7 @@ static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
   */
  static inline bool guc_ready(struct intel_guc *guc, u32 *status)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	u32 val = I915_READ(GUC_STATUS);
  	u32 uk_val = val & GS_UKERNEL_MASK;
@@ -234,7 +234,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
   */
  static int guc_xfer_ucode(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_uc_fw *guc_fw = &guc->fw;
  	unsigned long offset;
@@ -267,12 +267,12 @@ static int guc_xfer_ucode(struct intel_guc *guc)
  static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
  {
  	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	int ret;
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc); @@ -285,7 +285,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw) ret = guc_xfer_ucode(guc); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
  }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 67eadc82c396..300ed940f0b7 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -390,7 +390,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
  static int guc_log_relay_create(struct intel_guc_log *log)
  {
  	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct rchan *guc_log_relay_chan;
  	size_t n_subbufs, subbuf_size;
  	int ret;
@@ -409,9 +409,9 @@ static int guc_log_relay_create(struct intel_guc_log *log)
  	n_subbufs = 8;
guc_log_relay_chan = relay_open("guc_log",
-					dev_priv->drm.primary->debugfs_root,
+					i915->drm.primary->debugfs_root,
  					subbuf_size, n_subbufs,
-					&relay_callbacks, dev_priv);
+					&relay_callbacks, i915);
  	if (!guc_log_relay_chan) {
  		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
@@ -436,7 +436,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
  static void guc_log_capture_logs(struct intel_guc_log *log)
  {
  	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -445,7 +445,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
  	 * Generally device is expected to be active only at this
  	 * time, so get/put should be really quick.
  	 */
-	with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		guc_action_flush_log_complete(guc);
  }
@@ -505,7 +505,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
  int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
  {
  	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	intel_wakeref_t wakeref;
  	int ret = 0;
@@ -519,12 +519,12 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
  	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
  		return -EINVAL;
- mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
  		goto out_unlock;
- with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		ret = guc_action_control_log(guc,
  					     GUC_LOG_LEVEL_IS_VERBOSE(level),
  					     GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -537,7 +537,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
  	log->level = level;
out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
return ret;
  }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 97f6970d8da8..013d85278270 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -203,7 +203,7 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
  	return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
@@ -1014,7 +1014,7 @@ static bool guc_verify_doorbells(struct intel_guc *guc)
/**
   * guc_client_alloc() - Allocate an intel_guc_client
- * @dev_priv:	driver private data structure
+ * @i915:	driver private data structure
   * @engines:	The set of engines to enable for this client
   * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
   *		The kernel client to replace ExecList submission is created with
@@ -1026,13 +1026,13 @@ static bool guc_verify_doorbells(struct intel_guc *guc)
   * Return:	An intel_guc_client object if success, else NULL.
   */
  static struct intel_guc_client *
-guc_client_alloc(struct drm_i915_private *dev_priv,
+guc_client_alloc(struct drm_i915_private *i915,
  		 u32 engines,
  		 u32 priority,
  		 struct i915_gem_context *ctx)
  {
  	struct intel_guc_client *client;
-	struct intel_guc *guc = &dev_priv->guc;
+	struct intel_guc *guc = &i915->guc;
  	struct i915_vma *vma;
  	void *vaddr;
  	int ret;
@@ -1129,27 +1129,27 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
static int guc_clients_create(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_guc_client *client;
GEM_BUG_ON(guc->execbuf_client);
  	GEM_BUG_ON(guc->preempt_client);
- client = guc_client_alloc(dev_priv,
-				  INTEL_INFO(dev_priv)->engine_mask,
+	client = guc_client_alloc(i915,
+				  INTEL_INFO(i915)->engine_mask,
  				  GUC_CLIENT_PRIORITY_KMD_NORMAL,
-				  dev_priv->kernel_context);
+				  i915->kernel_context);
  	if (IS_ERR(client)) {
  		DRM_ERROR("Failed to create GuC client for submission!\n");
  		return PTR_ERR(client);
  	}
  	guc->execbuf_client = client;
- if (dev_priv->preempt_context) {
-		client = guc_client_alloc(dev_priv,
-					  INTEL_INFO(dev_priv)->engine_mask,
+	if (i915->preempt_context) {
+		client = guc_client_alloc(i915,
+					  INTEL_INFO(i915)->engine_mask,
  					  GUC_CLIENT_PRIORITY_KMD_HIGH,
-					  dev_priv->preempt_context);
+					  i915->preempt_context);
  		if (IS_ERR(client)) {
  			DRM_ERROR("Failed to create GuC client for preemption!\n");
  			guc_client_free(guc->execbuf_client);
@@ -1244,7 +1244,7 @@ static void guc_clients_disable(struct intel_guc *guc)
   */
  int intel_guc_submission_init(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	int ret;
@@ -1266,7 +1266,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
  	if (ret)
  		goto err_pool;
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		guc->preempt_work[id].engine = engine;
  		INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
  	}
@@ -1280,11 +1280,11 @@ int intel_guc_submission_init(struct intel_guc *guc)
void intel_guc_submission_fini(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		cancel_work_sync(&guc->preempt_work[id].work);
guc_clients_destroy(guc);
@@ -1294,9 +1294,9 @@ void intel_guc_submission_fini(struct intel_guc *guc)
  		guc_stage_desc_pool_destroy(guc);
  }
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	int irqs;
@@ -1305,7 +1305,7 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
  	 * to GuC
  	 */
  	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -1339,9 +1339,9 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
  	rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
  }
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	int irqs;
@@ -1352,7 +1352,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
  	 */
  	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
  	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
@@ -1408,7 +1408,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
int intel_guc_submission_enable(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	int err;
@@ -1433,9 +1433,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
  		return err;
/* Take over from manual control of ELSP (execlists) */
-	guc_interrupts_capture(dev_priv);
+	guc_interrupts_capture(i915);
- for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
  		engine->set_default_submission = guc_set_default_submission;
  		engine->set_default_submission(engine);
  	}
@@ -1445,11 +1445,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
void intel_guc_submission_disable(struct intel_guc *guc)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
- GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
+	GEM_BUG_ON(i915->gt.awake); /* GT should be parked first */
- guc_interrupts_release(dev_priv);
+	guc_interrupts_release(i915);
  	guc_clients_disable(guc);
  }
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 1d7d26e4cf14..b50d31a2e095 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -39,17 +39,17 @@
   * doc is available on https://01.org/group/2230/documentation-list.
   */
-static bool is_supported_device(struct drm_i915_private *dev_priv)
+static bool is_supported_device(struct drm_i915_private *i915)
  {
-	if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(i915))
  		return true;
-	if (IS_SKYLAKE(dev_priv))
+	if (IS_SKYLAKE(i915))
  		return true;
-	if (IS_KABYLAKE(dev_priv))
+	if (IS_KABYLAKE(i915))
  		return true;
-	if (IS_BROXTON(dev_priv))
+	if (IS_BROXTON(i915))
  		return true;
-	if (IS_COFFEELAKE(dev_priv))
+	if (IS_COFFEELAKE(i915))
  		return true;
return false;
@@ -57,21 +57,21 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
/**
   * intel_gvt_sanitize_options - sanitize GVT related options
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
   *
   * This function is called at the i915 options sanitize stage.
   */
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+void intel_gvt_sanitize_options(struct drm_i915_private *i915)
  {
  	if (!i915_modparams.enable_gvt)
  		return;
- if (intel_vgpu_active(dev_priv)) {
+	if (intel_vgpu_active(i915)) {
  		DRM_INFO("GVT-g is disabled for guest\n");
  		goto bail;
  	}
- if (!is_supported_device(dev_priv)) {
+	if (!is_supported_device(i915)) {
  		DRM_INFO("Unsupported device. GVT-g is disabled\n");
  		goto bail;
  	}
@@ -83,7 +83,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
/**
   * intel_gvt_init - initialize GVT components
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
   *
   * This function is called at the initialization stage to create a GVT device.
   *
@@ -91,7 +91,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
   * Zero on success, negative error code if failed.
   *
   */
-int intel_gvt_init(struct drm_i915_private *dev_priv)
+int intel_gvt_init(struct drm_i915_private *i915)
  {
  	int ret;
@@ -103,12 +103,12 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
  		return 0;
  	}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+	if (USES_GUC_SUBMISSION(i915)) {
  		DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
  		return -EIO;
  	}
- ret = intel_gvt_init_device(dev_priv);
+	ret = intel_gvt_init_device(i915);
  	if (ret) {
  		DRM_DEBUG_DRIVER("Fail to init GVT device\n");
  		goto bail;
@@ -123,15 +123,15 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
/**
   * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
- * @dev_priv: drm i915 private *
+ * @i915: drm i915 private *
   *
   * This function is called at the i915 driver unloading stage, to shutdown
   * GVT components and release the related resources.
   */
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+void intel_gvt_cleanup(struct drm_i915_private *i915)
  {
-	if (!intel_gvt_active(dev_priv))
+	if (!intel_gvt_active(i915))
  		return;
- intel_gvt_clean_device(dev_priv);
+	intel_gvt_clean_device(i915);
  }
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 61b246470282..6f81e26cb9ad 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -27,22 +27,22 @@
  struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
-int intel_gvt_init(struct drm_i915_private *dev_priv);
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
-int intel_gvt_init_device(struct drm_i915_private *dev_priv);
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
+int intel_gvt_init(struct drm_i915_private *i915);
+void intel_gvt_cleanup(struct drm_i915_private *i915);
+int intel_gvt_init_device(struct drm_i915_private *i915);
+void intel_gvt_clean_device(struct drm_i915_private *i915);
  int intel_gvt_init_host(void);
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
+void intel_gvt_sanitize_options(struct drm_i915_private *i915);
  #else
-static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
+static inline int intel_gvt_init(struct drm_i915_private *i915)
  {
  	return 0;
  }
-static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+static inline void intel_gvt_cleanup(struct drm_i915_private *i915)
  {
  }
-static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+static inline void intel_gvt_sanitize_options(struct drm_i915_private *i915)
  {
  }
  #endif
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index bc3a94d491c4..7df5f19b52d5 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -81,7 +81,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
  /* Is HDCP2.2 capable on Platform and Sink */
  bool intel_hdcp2_capable(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	bool capable = false;
@@ -91,12 +91,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
  		return false;
/* MEI interface is solid */
-	mutex_lock(&dev_priv->hdcp_comp_mutex);
-	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_lock(&i915->hdcp_comp_mutex);
+	if (!i915->hdcp_comp_added ||  !i915->hdcp_master) {
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return false;
  	}
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
/* Sink's capability for HDCP2.2 */
  	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
@@ -106,7 +106,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
static inline bool intel_hdcp_in_use(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum port port = connector->encoder->port;
  	u32 reg;
@@ -116,7 +116,7 @@ static inline bool intel_hdcp_in_use(struct intel_connector *connector) static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum port port = connector->encoder->port;
  	u32 reg;
@@ -145,9 +145,9 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
  	return 0;
  }
-static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
+static bool hdcp_key_loadable(struct drm_i915_private *i915)
  {
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_domains *power_domains = &i915->power_domains;
  	struct i915_power_well *power_well;
  	enum i915_power_well_id id;
  	bool enabled = false;
@@ -156,7 +156,7 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
  	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
  	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
  	 */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		id = HSW_DISP_PW_GLOBAL;
  	else
  		id = SKL_DISP_PW_1;
@@ -164,9 +164,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
  	mutex_lock(&power_domains->lock);
/* PG1 (power well #1) needs to be enabled */
-	for_each_power_well(dev_priv, power_well) {
+	for_each_power_well(i915, power_well) {
  		if (power_well->desc->id == id) {
-			enabled = power_well->desc->ops->is_enabled(dev_priv,
+			enabled = power_well->desc->ops->is_enabled(i915,
  								    power_well);
  			break;
  		}
@@ -182,14 +182,14 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
  	return enabled;
  }
-static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
  {
  	I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
  	I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
  		   HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
  }
-static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+static int intel_hdcp_load_keys(struct drm_i915_private *i915)
  {
  	int ret;
  	u32 val;
@@ -202,7 +202,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
  	 * out of reset. So if Key is not already loaded, its an error state.
  	 */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
  			return -ENXIO;
@@ -213,8 +213,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  	 * platforms except BXT and GLK, differ in the key load trigger process
  	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
  	 */
-	if (IS_GEN9_BC(dev_priv)) {
-		ret = sandybridge_pcode_write(dev_priv,
+	if (IS_GEN9_BC(i915)) {
+		ret = sandybridge_pcode_write(i915,
  					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
  		if (ret) {
  			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
@@ -226,7 +226,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  	}
/* Wait for the keys to load (500us) */
-	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
+	ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
  					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
  					10, 1, &val);
  	if (ret)
@@ -241,10 +241,10 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  }
/* Returns updated SHA-1 index */
-static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
  {
  	I915_WRITE(HDCP_SHA_TEXT, sha_text);
-	if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
+	if (intel_wait_for_register(&i915->uncore, HDCP_REP_CTL,
  				    HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
  		DRM_ERROR("Timed out waiting for SHA1 ready\n");
  		return -ETIMEDOUT;
@@ -279,11 +279,11 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  				const struct intel_hdcp_shim *shim,
  				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
  {
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	u32 vprime, sha_text, sha_leftovers, rep_ctl;
  	int ret, i, j, sha_idx;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
+	i915 = intel_dig_port->base.base.dev->dev_private;
/* Process V' values from the receiver */
  	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
@@ -317,7 +317,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		for (j = 0; j < sha_empty; j++)
  			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
- ret = intel_write_sha_text(dev_priv, sha_text);
+		ret = intel_write_sha_text(i915, sha_text);
  		if (ret < 0)
  			return ret;
@@ -340,7 +340,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		if (sizeof(sha_text) > sha_leftovers)
  			continue;
- ret = intel_write_sha_text(dev_priv, sha_text);
+		ret = intel_write_sha_text(i915, sha_text);
  		if (ret < 0)
  			return ret;
  		sha_leftovers = 0;
@@ -357,7 +357,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  	if (sha_leftovers == 0) {
  		/* Write 16 bits of text, 16 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
-		ret = intel_write_sha_text(dev_priv,
+		ret = intel_write_sha_text(i915,
  					   bstatus[0] << 8 | bstatus[1]);
  		if (ret < 0)
  			return ret;
@@ -365,14 +365,14 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Write 32 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 16 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
@@ -383,21 +383,21 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
  		/* Only 24-bits of data, must be in the LSB */
  		sha_text = (sha_text & 0xffffff00) >> 8;
-		ret = intel_write_sha_text(dev_priv, sha_text);
+		ret = intel_write_sha_text(i915, sha_text);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 24 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
@@ -406,7 +406,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		/* Write 32 bits of text */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
-		ret = intel_write_sha_text(dev_priv, sha_text);
+		ret = intel_write_sha_text(i915, sha_text);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
@@ -414,7 +414,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		/* Write 64 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  		for (i = 0; i < 2; i++) {
-			ret = intel_write_sha_text(dev_priv, 0);
+			ret = intel_write_sha_text(i915, 0);
  			if (ret < 0)
  				return ret;
  			sha_idx += sizeof(sha_text);
@@ -423,28 +423,28 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  		/* Write 32 bits of text */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  		sha_text |= bstatus[0] << 24;
-		ret = intel_write_sha_text(dev_priv, sha_text);
+		ret = intel_write_sha_text(i915, sha_text);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 8 bits of text, 24 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
-		ret = intel_write_sha_text(dev_priv, bstatus[1]);
+		ret = intel_write_sha_text(i915, bstatus[1]);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
/* Write 8 bits of M0 */
  		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
@@ -457,7 +457,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
  	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
-		ret = intel_write_sha_text(dev_priv, 0);
+		ret = intel_write_sha_text(i915, 0);
  		if (ret < 0)
  			return ret;
  		sha_idx += sizeof(sha_text);
@@ -469,13 +469,13 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
  	 */
  	sha_text = (num_downstream * 5 + 10) * 8;
-	ret = intel_write_sha_text(dev_priv, sha_text);
+	ret = intel_write_sha_text(i915, sha_text);
  	if (ret < 0)
  		return ret;
/* Tell the HW we're done with the hash and wait for it to ACK */
  	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
-	if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
+	if (intel_wait_for_register(&i915->uncore, HDCP_REP_CTL,
  				    HDCP_SHA1_COMPLETE,
  				    HDCP_SHA1_COMPLETE, 1)) {
  		DRM_ERROR("Timed out waiting for SHA1 complete\n");
@@ -571,7 +571,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	struct drm_device *dev = connector->base.dev;
  	const struct intel_hdcp_shim *shim = hdcp->shim;
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
  	enum port port;
  	unsigned long r0_prime_gen_start;
  	int ret, i, tries = 2;
@@ -589,7 +589,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
  	} ri;
  	bool repeater_present, hdcp_capable;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
+	i915 = intel_dig_port->base.base.dev->dev_private;
port = intel_dig_port->base.port; @@ -615,7 +615,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
  	I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
-	if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
+	if (intel_wait_for_register(&i915->uncore, PORT_HDCP_STATUS(port),
  				    HDCP_STATUS_AN_READY,
  				    HDCP_STATUS_AN_READY, 1)) {
  		DRM_ERROR("Timed out waiting for An\n");
@@ -701,7 +701,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
  	}
/* Wait for encryption confirmation */
-	if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
+	if (intel_wait_for_register(&i915->uncore, PORT_HDCP_STATUS(port),
  				    HDCP_STATUS_ENC, HDCP_STATUS_ENC,
  				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
  		DRM_ERROR("Timed out waiting for encryption\n");
@@ -723,7 +723,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
  static int _intel_hdcp_disable(struct intel_connector *connector)
  {
  	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct drm_i915_private *i915 = connector->base.dev->dev_private;
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  	enum port port = intel_dig_port->base.port;
  	int ret;
@@ -733,7 +733,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
  	I915_WRITE(PORT_HDCP_CONF(port), 0);
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    PORT_HDCP_STATUS(port), ~0, 0,
  				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
  		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
@@ -753,22 +753,22 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
  static int _intel_hdcp_enable(struct intel_connector *connector)
  {
  	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct drm_i915_private *i915 = connector->base.dev->dev_private;
  	int i, ret, tries = 3;
DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
  		      connector->base.name, connector->base.base.id);
- if (!hdcp_key_loadable(dev_priv)) {
+	if (!hdcp_key_loadable(i915)) {
  		DRM_ERROR("HDCP key Load is not possible\n");
  		return -ENXIO;
  	}
for (i = 0; i < KEY_LOAD_TRIES; i++) {
-		ret = intel_hdcp_load_keys(dev_priv);
+		ret = intel_hdcp_load_keys(i915);
  		if (!ret)
  			break;
-		intel_hdcp_clear_keys(dev_priv);
+		intel_hdcp_clear_keys(i915);
  	}
  	if (ret) {
  		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
@@ -803,7 +803,7 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
  static int intel_hdcp_check_link(struct intel_connector *connector)
  {
  	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct drm_i915_private *i915 = connector->base.dev->dev_private;
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  	enum port port = intel_dig_port->base.port;
  	int ret = 0;
@@ -884,10 +884,10 @@ static void intel_hdcp_prop_work(struct work_struct *work)
  	drm_modeset_unlock(&dev->mode_config.connection_mutex);
  }
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
+bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
  {
  	/* PORT E doesn't have HDCP, and PORT F is disabled */
-	return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+	return INTEL_GEN(i915) >= 9 && port < PORT_E;
  }
static int
@@ -895,22 +895,22 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
  		       struct hdcp2_ake_init *ake_data)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
  	if (ret)
  		DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -923,15 +923,15 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
  				size_t *msg_sz)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
@@ -940,7 +940,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
  							 ek_pub_km, msg_sz);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -949,22 +949,22 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
  			       struct hdcp2_ake_send_hprime *rx_hprime)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -974,22 +974,22 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
  			 struct hdcp2_ake_send_pairing_info *pairing_info)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -999,22 +999,22 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
  		      struct hdcp2_lc_init *lc_init)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1024,22 +1024,22 @@ hdcp2_verify_lprime(struct intel_connector *connector,
  		    struct hdcp2_lc_send_lprime *rx_lprime)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1048,22 +1048,22 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
  			      struct hdcp2_ske_send_eks *ske_data)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1075,15 +1075,15 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
  				      struct hdcp2_rep_send_ack *rep_send_ack)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
@@ -1092,7 +1092,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
  							 rep_send_ack);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1102,22 +1102,22 @@ hdcp2_verify_mprime(struct intel_connector *connector,
  		    struct hdcp2_rep_stream_ready *stream_ready)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1125,43 +1125,43 @@ hdcp2_verify_mprime(struct intel_connector *connector,
  static int hdcp2_authenticate_port(struct intel_connector *connector)
  {
  	struct hdcp_port_data *data = &connector->hdcp.port_data;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
  	if (ret < 0)
  		DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
static int hdcp2_close_mei_session(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct i915_hdcp_comp_master *comp;
  	int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	comp = dev_priv->hdcp_master;
+	mutex_lock(&i915->hdcp_comp_mutex);
+	comp = i915->hdcp_master;
if (!comp || !comp->ops) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return -EINVAL;
  	}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
  					     &connector->hdcp.port_data);
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_unlock(&i915->hdcp_comp_mutex);
return ret;
  }
@@ -1488,7 +1488,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
  static int hdcp2_enable_encryption(struct intel_connector *connector)
  {
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	enum port port = connector->encoder->port;
  	int ret;
@@ -1511,7 +1511,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
  			   CTL_LINK_ENCRYPTION_REQ);
  	}
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+	ret = intel_wait_for_register(&i915->uncore, HDCP2_STATUS_DDI(port),
  				      LINK_ENCRYPTION_STATUS,
  				      LINK_ENCRYPTION_STATUS,
  				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1522,7 +1522,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
  static int hdcp2_disable_encryption(struct intel_connector *connector)
  {
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	enum port port = connector->encoder->port;
  	int ret;
@@ -1532,7 +1532,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
  	I915_WRITE(HDCP2_CTL_DDI(port),
  		   I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+	ret = intel_wait_for_register(&i915->uncore, HDCP2_STATUS_DDI(port),
  				      LINK_ENCRYPTION_STATUS, 0x0,
  				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
  	if (ret == -ETIMEDOUT)
@@ -1628,7 +1628,7 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
  static int intel_hdcp2_check_link(struct intel_connector *connector)
  {
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	enum port port = connector->encoder->port;
  	int ret = 0;
@@ -1721,13 +1721,13 @@ static void intel_hdcp_check_work(struct work_struct *work)
  static int i915_hdcp_component_bind(struct device *i915_kdev,
  				    struct device *mei_kdev, void *data)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
DRM_DEBUG("I915 HDCP comp bind\n");
-	mutex_lock(&dev_priv->hdcp_comp_mutex);
-	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
-	dev_priv->hdcp_master->mei_dev = mei_kdev;
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_lock(&i915->hdcp_comp_mutex);
+	i915->hdcp_master = (struct i915_hdcp_comp_master *)data;
+	i915->hdcp_master->mei_dev = mei_kdev;
+	mutex_unlock(&i915->hdcp_comp_mutex);
return 0;
  }
@@ -1735,12 +1735,12 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
  static void i915_hdcp_component_unbind(struct device *i915_kdev,
  				       struct device *mei_kdev, void *data)
  {
-	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
DRM_DEBUG("I915 HDCP comp unbind\n");
-	mutex_lock(&dev_priv->hdcp_comp_mutex);
-	dev_priv->hdcp_master = NULL;
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_lock(&i915->hdcp_comp_mutex);
+	i915->hdcp_master = NULL;
+	mutex_unlock(&i915->hdcp_comp_mutex);
  }
static const struct component_ops i915_hdcp_component_ops = {
@@ -1773,34 +1773,34 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector)
  	return 0;
  }
-static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+static bool is_hdcp2_supported(struct drm_i915_private *i915)
  {
  	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
  		return false;
- return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
-		IS_KABYLAKE(dev_priv));
+	return (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915) ||
+		IS_KABYLAKE(i915));
  }
-void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+void intel_hdcp_component_init(struct drm_i915_private *i915)
  {
  	int ret;
- if (!is_hdcp2_supported(dev_priv))
+	if (!is_hdcp2_supported(i915))
  		return;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
-	WARN_ON(dev_priv->hdcp_comp_added);
+	mutex_lock(&i915->hdcp_comp_mutex);
+	WARN_ON(i915->hdcp_comp_added);
- dev_priv->hdcp_comp_added = true;
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
-	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+	i915->hdcp_comp_added = true;
+	mutex_unlock(&i915->hdcp_comp_mutex);
+	ret = component_add_typed(i915->drm.dev, &i915_hdcp_component_ops,
  				  I915_COMPONENT_HDCP);
  	if (ret < 0) {
  		DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
-		mutex_lock(&dev_priv->hdcp_comp_mutex);
-		dev_priv->hdcp_comp_added = false;
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		mutex_lock(&i915->hdcp_comp_mutex);
+		i915->hdcp_comp_added = false;
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return;
  	}
  }
@@ -1822,7 +1822,7 @@ static void intel_hdcp2_init(struct intel_connector *connector)
  int intel_hdcp_init(struct intel_connector *connector,
  		    const struct intel_hdcp_shim *shim)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_hdcp *hdcp = &connector->hdcp;
  	int ret;
@@ -1838,7 +1838,7 @@ int intel_hdcp_init(struct intel_connector *connector,
  	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
  	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
- if (is_hdcp2_supported(dev_priv))
+	if (is_hdcp2_supported(i915))
  		intel_hdcp2_init(connector);
  	init_waitqueue_head(&hdcp->cp_irq_queue);
@@ -1905,18 +1905,18 @@ int intel_hdcp_disable(struct intel_connector *connector)
  	return ret;
  }
-void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+void intel_hdcp_component_fini(struct drm_i915_private *i915)
  {
-	mutex_lock(&dev_priv->hdcp_comp_mutex);
-	if (!dev_priv->hdcp_comp_added) {
-		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	mutex_lock(&i915->hdcp_comp_mutex);
+	if (!i915->hdcp_comp_added) {
+		mutex_unlock(&i915->hdcp_comp_mutex);
  		return;
  	}
- dev_priv->hdcp_comp_added = false;
-	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	i915->hdcp_comp_added = false;
+	mutex_unlock(&i915->hdcp_comp_mutex);
- component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+	component_del(i915->drm.dev, &i915_hdcp_component_ops);
  }
void intel_hdcp_cleanup(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
index be8da85c866a..24079dda1ac4 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/intel_hdcp.h
@@ -23,11 +23,11 @@ int intel_hdcp_init(struct intel_connector *connector,
  		    const struct intel_hdcp_shim *hdcp_shim);
  int intel_hdcp_enable(struct intel_connector *connector);
  int intel_hdcp_disable(struct intel_connector *connector);
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool is_hdcp_supported(struct drm_i915_private *i915, enum port port);
  bool intel_hdcp_capable(struct intel_connector *connector);
  bool intel_hdcp2_capable(struct intel_connector *connector);
-void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
-void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_init(struct drm_i915_private *i915);
+void intel_hdcp_component_fini(struct drm_i915_private *i915);
  void intel_hdcp_cleanup(struct intel_connector *connector);
  void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 187a2b828b97..c5be41ba36da 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -67,17 +67,17 @@ static void
  assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
  {
  	struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 enabled_bits;
- enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+	enabled_bits = HAS_DDI(i915) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
  	     "HDMI port enabled, expecting disabled\n");
  }
static void
-assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *i915,
  				     enum transcoder cpu_transcoder)
  {
  	WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
@@ -163,7 +163,7 @@ static u32 hsw_infoframe_enable(unsigned int type)
  }
static i915_reg_t
-hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+hsw_dip_data_reg(struct drm_i915_private *i915,
  		 enum transcoder cpu_transcoder,
  		 unsigned int type,
  		 int i)
@@ -207,7 +207,7 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
  				const void *frame, ssize_t len)
  {
  	const u32 *data = frame;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val = I915_READ(VIDEO_DIP_CTL);
  	int i;
@@ -241,7 +241,7 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
  			       unsigned int type,
  			       void *frame, ssize_t len)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val, *data = frame;
  	int i;
@@ -259,7 +259,7 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
  static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -278,7 +278,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
  				const void *frame, ssize_t len)
  {
  	const u32 *data = frame;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
  	u32 val = I915_READ(reg);
@@ -314,7 +314,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
  			       unsigned int type,
  			       void *frame, ssize_t len)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	u32 val, *data = frame;
  	int i;
@@ -333,7 +333,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
  static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
  	i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
  	u32 val = I915_READ(reg);
@@ -355,7 +355,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
  				const void *frame, ssize_t len)
  {
  	const u32 *data = frame;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
  	u32 val = I915_READ(reg);
@@ -394,7 +394,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
  			       unsigned int type,
  			       void *frame, ssize_t len)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	u32 val, *data = frame;
  	int i;
@@ -413,7 +413,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
  static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
  	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
@@ -431,7 +431,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
  				const void *frame, ssize_t len)
  {
  	const u32 *data = frame;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
  	u32 val = I915_READ(reg);
@@ -467,7 +467,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
  			       unsigned int type,
  			       void *frame, ssize_t len)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	u32 val, *data = frame;
  	int i;
@@ -486,7 +486,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
  static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
  	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
@@ -507,7 +507,7 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
  				const void *frame, ssize_t len)
  {
  	const u32 *data = frame;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
  	int data_size;
@@ -520,13 +520,13 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
  	I915_WRITE(ctl_reg, val);
for (i = 0; i < len; i += 4) {
-		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+		I915_WRITE(hsw_dip_data_reg(i915, cpu_transcoder,
  					    type, i >> 2), *data);
  		data++;
  	}
  	/* Write every possible data byte to force correct ECC calculation. */
  	for (; i < data_size; i += 4)
-		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+		I915_WRITE(hsw_dip_data_reg(i915, cpu_transcoder,
  					    type, i >> 2), 0);
val |= hsw_infoframe_enable(type);
@@ -539,7 +539,7 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
  			       unsigned int type,
  			       void *frame, ssize_t len)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 val, *data = frame;
  	int i;
@@ -547,14 +547,14 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
  	val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+		*data++ = I915_READ(hsw_dip_data_reg(i915, cpu_transcoder,
  						     type, i >> 2));
  }
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
  	u32 mask;
@@ -562,7 +562,7 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
  		VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
  		VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		mask |= VIDEO_DIP_ENABLE_DRM_GLK;
return val & mask;
@@ -593,7 +593,7 @@ u32 intel_hdmi_infoframe_enable(unsigned int type)
  u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
  	u32 val, ret = 0;
  	int i;
@@ -604,7 +604,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
  	for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
  		unsigned int type = infoframe_type_to_idx[i];
- if (HAS_DDI(dev_priv)) {
+		if (HAS_DDI(i915)) {
  			if (val & hsw_infoframe_enable(type))
  				ret |= BIT(i);
  		} else {
@@ -804,10 +804,10 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
  				 struct drm_connector_state *conn_state)
  {
  	struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	int ret;
- if (!(INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+	if (!(INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)))
  		return true;
if (!crtc_state->has_infoframe)
@@ -837,7 +837,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
  	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
  	i915_reg_t reg = VIDEO_DIP_CTL;
@@ -947,7 +947,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
  					 const struct intel_crtc_state *crtc_state,
  					 const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	i915_reg_t reg;
@@ -955,11 +955,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
  	     intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
  		return false;
- if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
-	else if (HAS_PCH_SPLIT(dev_priv))
+	else if (HAS_PCH_SPLIT(i915))
  		reg = TVIDEO_DIP_GCP(crtc->pipe);
  	else
  		return false;
@@ -972,7 +972,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
  void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
  				   struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	i915_reg_t reg;
@@ -980,11 +980,11 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
  	     intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
  		return;
- if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
  		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
-	else if (HAS_PCH_SPLIT(dev_priv))
+	else if (HAS_PCH_SPLIT(i915))
  		reg = TVIDEO_DIP_GCP(crtc->pipe);
  	else
  		return;
@@ -996,9 +996,9 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
  					     struct intel_crtc_state *crtc_state,
  					     struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+	if (IS_G4X(i915) || !crtc_state->has_infoframe)
  		return;
crtc_state->infoframes.enable |=
@@ -1019,7 +1019,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
  	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -1078,7 +1078,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -1127,7 +1127,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -1185,11 +1185,11 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
  	u32 val = I915_READ(reg);
- assert_hdmi_transcoder_func_disabled(dev_priv,
+	assert_hdmi_transcoder_func_disabled(i915,
  					     crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -1225,9 +1225,9 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+	struct drm_i915_private *i915 = to_i915(intel_hdmi_to_dev(hdmi));
  	struct i2c_adapter *adapter =
-		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+		intel_gmbus_get_adapter(i915, hdmi->ddc_bus);
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
  		return;
@@ -1243,9 +1243,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
  				unsigned int offset, void *buffer, size_t size)
  {
  	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
  							      hdmi->ddc_bus);
  	int ret;
  	u8 start = offset & 0xff;
@@ -1273,9 +1273,9 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
  				 unsigned int offset, void *buffer, size_t size)
  {
  	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
  							      hdmi->ddc_bus);
  	int ret;
  	u8 *write_buf;
@@ -1308,9 +1308,9 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
  				  u8 *an)
  {
  	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
  							      hdmi->ddc_bus);
  	int ret;
@@ -1428,7 +1428,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  	struct drm_crtc *crtc = connector->base.state->crtc;
  	struct intel_crtc *intel_crtc = container_of(crtc,
@@ -1463,7 +1463,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
  {
  	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
  	struct intel_connector *connector = hdmi->attached_connector;
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	int ret;
if (!enable)
@@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
  	 * WA: To fix incorrect positioning of the window of
  	 * opportunity and enc_en signalling in KABYLAKE.
  	 */
-	if (IS_KABYLAKE(dev_priv) && enable)
+	if (IS_KABYLAKE(i915) && enable)
  		return kbl_repositioning_enc_en_signal(connector);
return 0;
@@ -1489,7 +1489,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
  static
  bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		intel_dig_port->base.base.dev->dev_private;
  	enum port port = intel_dig_port->base.port;
  	int ret;
@@ -1720,7 +1720,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *crtc_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
@@ -1729,7 +1729,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
  	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
-	if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+	if (!HAS_PCH_SPLIT(i915) && crtc_state->limited_color_range)
  		hdmi_val |= HDMI_COLOR_RANGE_16_235;
  	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  		hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -1744,9 +1744,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
  	if (crtc_state->has_hdmi_sink)
  		hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (IS_CHERRYVIEW(i915))
  		hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
  	else
  		hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1758,19 +1758,19 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
  static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
  				    enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	intel_wakeref_t wakeref;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
- ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
+	ret = intel_sdvo_port_enabled(i915, intel_hdmi->hdmi_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
  }
@@ -1780,7 +1780,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
  {
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 tmp, flags = 0;
  	int dotclock;
@@ -1810,7 +1810,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
  	if (tmp & HDMI_AUDIO_ENABLE)
  		pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+	if (!HAS_PCH_SPLIT(i915) &&
  	    tmp & HDMI_COLOR_RANGE_16_235)
  		pipe_config->limited_color_range = true;
@@ -1858,7 +1858,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
  			    const struct drm_connector_state *conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	u32 temp;
@@ -1880,7 +1880,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
  			    const struct drm_connector_state *conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	u32 temp;
@@ -1930,7 +1930,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
  			    const struct drm_connector_state *conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	enum pipe pipe = crtc->pipe;
@@ -1991,7 +1991,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
  			       const struct drm_connector_state *old_conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	struct intel_digital_port *intel_dig_port =
  		hdmi_to_dig_port(intel_hdmi);
@@ -2009,13 +2009,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
  	 * to transcoder A after disabling it to allow the
  	 * matching DP port to be enabled on transcoder A.
  	 */
-	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+	if (HAS_PCH_IBX(i915) && crtc->pipe == PIPE_B) {
  		/*
  		 * We get CPU/PCH FIFO underruns on the other pipe when
  		 * doing the workaround. Sweep them under the rug.
  		 */
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, false);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, false);
temp &= ~SDVO_PIPE_SEL_MASK;
  		temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
@@ -2032,9 +2032,9 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
  		I915_WRITE(intel_hdmi->hdmi_reg, temp);
  		POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+		intel_wait_for_vblank_if_active(i915, PIPE_A);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, true);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, true);
  	}
intel_dig_port->set_infoframes(encoder,
@@ -2073,16 +2073,16 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[encoder->port];
+		&i915->vbt.ddi_port_info[encoder->port];
  	int max_tmds_clock;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		max_tmds_clock = 594000;
-	else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+	else if (INTEL_GEN(i915) >= 8 || IS_HASWELL(i915))
  		max_tmds_clock = 300000;
-	else if (INTEL_GEN(dev_priv) >= 5)
+	else if (INTEL_GEN(i915) >= 5)
  		max_tmds_clock = 225000;
  	else
  		max_tmds_clock = 165000;
@@ -2123,7 +2123,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
  		      int clock, bool respect_downstream_limits,
  		      bool force_dvi)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+	struct drm_i915_private *i915 = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
  		return MODE_CLOCK_LOW;
@@ -2131,11 +2131,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
  		return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
-	if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
+	if (IS_GEN9_LP(i915) && clock > 223333 && clock < 240000)
  		return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
-	if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+	if (IS_CHERRYVIEW(i915) && clock > 216000 && clock < 240000)
  		return MODE_CLOCK_RANGE;
return MODE_OK;
@@ -2147,7 +2147,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
  {
  	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
  	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum drm_mode_status status;
  	int clock;
  	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -2176,12 +2176,12 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (hdmi->has_hdmi_sink && !force_dvi) {
  		/* if we can't do 8bpc we may still be able to do 12bpc */
-		if (status != MODE_OK && !HAS_GMCH(dev_priv))
+		if (status != MODE_OK && !HAS_GMCH(i915))
  			status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
  						       true, force_dvi);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
-		if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+		if (status != MODE_OK && INTEL_GEN(i915) >= 11)
  			status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
  						       true, force_dvi);
  	}
@@ -2192,7 +2192,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
  static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
  				     int bpc)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(crtc_state->base.crtc->dev);
  	struct drm_atomic_state *state = crtc_state->base.state;
  	struct drm_connector_state *connector_state;
@@ -2201,10 +2201,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
  		&crtc_state->base.adjusted_mode;
  	int i;
- if (HAS_GMCH(dev_priv))
+	if (HAS_GMCH(i915))
  		return false;
- if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+	if (bpc == 10 && INTEL_GEN(i915) < 11)
  		return false;
if (crtc_state->pipe_bpp < bpc * 3)
@@ -2246,13 +2246,13 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
  	}
/* Display WA #1139: glk */
-	if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+	if (bpc == 12 && IS_GLK_REVID(i915, 0, GLK_REVID_A1) &&
  	    adjusted_mode->htotal > 5460)
  		return false;
/* Display Wa_1405510057:icl */
  	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
-	    bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+	    bpc == 10 && INTEL_GEN(i915) >= 11 &&
  	    (adjusted_mode->crtc_hblank_end -
  	     adjusted_mode->crtc_hblank_start) % 8 == 2)
  		return false;
@@ -2297,7 +2297,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
  			      struct drm_connector_state *conn_state)
  {
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  	struct drm_connector *connector = conn_state->connector;
  	struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
@@ -2345,7 +2345,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
  		}
  	}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
+	if (HAS_PCH_SPLIT(i915) && !HAS_DDI(i915))
  		pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink) {
@@ -2399,8 +2399,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4; - if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
-					   IS_GEMINILAKE(dev_priv))) {
+	if (scdc->scrambling.supported && (INTEL_GEN(i915) >= 10 ||
+					   IS_GEMINILAKE(i915))) {
  		if (scdc->scrambling.low_rates)
  			pipe_config->hdmi_scrambling = true;
@@ -2453,11 +2453,11 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
  static void
  intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
  	enum port port = hdmi_to_dig_port(hdmi)->base.port;
  	struct i2c_adapter *adapter =
-		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+		intel_gmbus_get_adapter(i915, hdmi->ddc_bus);
  	enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
/*
@@ -2477,7 +2477,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
  		 * Make sure not to set limits for that port.
  		 */
  		if (has_edid && !connector->override_edid &&
-		    intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+		    intel_bios_is_port_dp_dual_mode(i915, port)) {
  			DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
  			type = DRM_DP_DUAL_MODE_TYPE1_DVI;
  		} else {
@@ -2500,16 +2500,16 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
  static bool
  intel_hdmi_set_edid(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
  	intel_wakeref_t wakeref;
  	struct edid *edid;
  	bool connected = false;
  	struct i2c_adapter *i2c;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
- i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+	i2c = intel_gmbus_get_adapter(i915, intel_hdmi->ddc_bus);
edid = drm_get_edid(connector, i2c); @@ -2522,7 +2522,7 @@ intel_hdmi_set_edid(struct drm_connector *connector) intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
to_intel_connector(connector)->detect_edid = edid;
  	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -2541,7 +2541,7 @@ static enum drm_connector_status
  intel_hdmi_detect(struct drm_connector *connector, bool force)
  {
  	enum drm_connector_status status = connector_status_disconnected;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
  	struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
  	intel_wakeref_t wakeref;
@@ -2549,9 +2549,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
  	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  		      connector->base.id, connector->name);
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
- if (INTEL_GEN(dev_priv) >= 11 &&
+	if (INTEL_GEN(i915) >= 11 &&
  	    !intel_digital_port_connected(encoder))
  		goto out;
@@ -2561,7 +2561,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
  		status = connector_status_connected;
out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+	intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
if (status != connector_status_connected)
  		cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
@@ -2613,7 +2613,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
  				const struct drm_connector_state *conn_state)
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -2627,7 +2627,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, g4x_enable_hdmi(encoder, pipe_config, conn_state); - vlv_wait_port_ready(dev_priv, dport, 0x0);
+	vlv_wait_port_ready(i915, dport, 0x0);
  }
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
@@ -2668,14 +2668,14 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
  				  const struct drm_connector_state *old_conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- vlv_dpio_get(dev_priv);
+	vlv_dpio_get(i915);
/* Assert data lane reset */
  	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- vlv_dpio_put(dev_priv);
+	vlv_dpio_put(i915);
  }
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
@@ -2684,7 +2684,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
  {
  	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
chv_phy_pre_encoder_enable(encoder, pipe_config); @@ -2698,7 +2698,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, g4x_enable_hdmi(encoder, pipe_config, conn_state); - vlv_wait_port_ready(dev_priv, dport, 0x0);
+	vlv_wait_port_ready(i915, dport, 0x0);
/* Second common lane will stay alive on its own now */
  	chv_phy_release_cl2_override(encoder);
@@ -2707,10 +2707,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
  static struct i2c_adapter *
  intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+	return intel_gmbus_get_adapter(i915, intel_hdmi->ddc_bus);
  }
static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
@@ -2791,7 +2791,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
  static void
  intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_digital_port *intel_dig_port =
  				hdmi_to_dig_port(intel_hdmi);
@@ -2810,11 +2810,11 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
  	drm_connector_attach_content_type_property(connector);
  	connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		drm_object_attach_property(&connector->base,
  			connector->dev->mode_config.hdr_output_metadata_property, 0);
- if (!HAS_GMCH(dev_priv))
+	if (!HAS_GMCH(i915))
  		drm_connector_attach_max_bpc_property(connector, 8, 12);
  }
@@ -2841,12 +2841,12 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
  				       bool high_tmds_clock_ratio,
  				       bool scrambling)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
  	struct drm_scrambling *sink_scrambling =
  		&connector->display_info.hdmi.scdc.scrambling;
  	struct i2c_adapter *adapter =
-		intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+		intel_gmbus_get_adapter(i915, intel_hdmi->ddc_bus);
if (!sink_scrambling->supported)
  		return true;
@@ -2861,7 +2861,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
  		drm_scdc_set_scrambling(adapter, scrambling);
  }
-static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 chv_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
  {
  	u8 ddc_pin;
@@ -2883,7 +2883,7 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
  	return ddc_pin;
  }
-static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 bxt_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
  {
  	u8 ddc_pin;
@@ -2902,7 +2902,7 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
  	return ddc_pin;
  }
-static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+static u8 cnp_port_to_ddc_pin(struct drm_i915_private *i915,
  			      enum port port)
  {
  	u8 ddc_pin;
@@ -2928,7 +2928,7 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
  	return ddc_pin;
  }
-static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 icl_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
  {
  	u8 ddc_pin;
@@ -2959,7 +2959,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
  	return ddc_pin;
  }
-static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+static u8 g4x_port_to_ddc_pin(struct drm_i915_private *i915,
  			      enum port port)
  {
  	u8 ddc_pin;
@@ -2982,11 +2982,11 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
  	return ddc_pin;
  }
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *i915,
  			     enum port port)
  {
  	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[port];
+		&i915->vbt.ddi_port_info[port];
  	u8 ddc_pin;
if (info->alternate_ddc_pin) {
@@ -2995,16 +2995,16 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
  		return info->alternate_ddc_pin;
  	}
- if (HAS_PCH_ICP(dev_priv))
-		ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
-	else if (HAS_PCH_CNP(dev_priv))
-		ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
-	else if (IS_GEN9_LP(dev_priv))
-		ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
-	else if (IS_CHERRYVIEW(dev_priv))
-		ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+	if (HAS_PCH_ICP(i915))
+		ddc_pin = icl_port_to_ddc_pin(i915, port);
+	else if (HAS_PCH_CNP(i915))
+		ddc_pin = cnp_port_to_ddc_pin(i915, port);
+	else if (IS_GEN9_LP(i915))
+		ddc_pin = bxt_port_to_ddc_pin(i915, port);
+	else if (IS_CHERRYVIEW(i915))
+		ddc_pin = chv_port_to_ddc_pin(i915, port);
  	else
-		ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
+		ddc_pin = g4x_port_to_ddc_pin(i915, port);
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
  		      ddc_pin, port_name(port));
@@ -3014,20 +3014,20 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(intel_dig_port->base.base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		intel_dig_port->write_infoframe = vlv_write_infoframe;
  		intel_dig_port->read_infoframe = vlv_read_infoframe;
  		intel_dig_port->set_infoframes = vlv_set_infoframes;
  		intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
-	} else if (IS_G4X(dev_priv)) {
+	} else if (IS_G4X(i915)) {
  		intel_dig_port->write_infoframe = g4x_write_infoframe;
  		intel_dig_port->read_infoframe = g4x_read_infoframe;
  		intel_dig_port->set_infoframes = g4x_set_infoframes;
  		intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
-	} else if (HAS_DDI(dev_priv)) {
+	} else if (HAS_DDI(i915)) {
  		if (intel_dig_port->lspcon.active) {
  			intel_dig_port->write_infoframe = lspcon_write_infoframe;
  			intel_dig_port->read_infoframe = lspcon_read_infoframe;
@@ -3039,7 +3039,7 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
  			intel_dig_port->set_infoframes = hsw_set_infoframes;
  			intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
  		}
-	} else if (HAS_PCH_IBX(dev_priv)) {
+	} else if (HAS_PCH_IBX(i915)) {
  		intel_dig_port->write_infoframe = ibx_write_infoframe;
  		intel_dig_port->read_infoframe = ibx_read_infoframe;
  		intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -3059,7 +3059,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
  	struct intel_encoder *intel_encoder = &intel_dig_port->base;
  	struct drm_device *dev = intel_encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
@@ -3078,16 +3078,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  	connector->doublescan_allowed = 0;
  	connector->stereo_allowed = 1;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		connector->ycbcr_420_allowed = true;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(i915, port);
if (WARN_ON(port == PORT_A))
  		return;
-	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+	intel_encoder->hpd_pin = intel_hpd_pin_default(i915, port);
- if (HAS_DDI(dev_priv))
+	if (HAS_DDI(i915))
  		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
  	else
  		intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -3097,7 +3097,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  	intel_connector_attach_encoder(intel_connector, intel_encoder);
  	intel_hdmi->attached_connector = intel_connector;
- if (is_hdcp_supported(dev_priv, port)) {
+	if (is_hdcp_supported(i915, port)) {
  		int ret = intel_hdcp_init(intel_connector,
  					  &intel_hdmi_hdcp_shim);
  		if (ret)
@@ -3108,7 +3108,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  	 * 0xd.  Failure to do so will result in spurious interrupts being
  	 * generated on the port when a cable is not attached.
  	 */
-	if (IS_G45(dev_priv)) {
+	if (IS_G45(i915)) {
  		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
  		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
  	}
@@ -3119,7 +3119,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  		DRM_DEBUG_KMS("CEC notifier get failed\n");
  }
-void intel_hdmi_init(struct drm_i915_private *dev_priv,
+void intel_hdmi_init(struct drm_i915_private *i915,
  		     i915_reg_t hdmi_reg, enum port port)
  {
  	struct intel_digital_port *intel_dig_port;
@@ -3138,13 +3138,13 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &intel_dig_port->base; - drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+	drm_encoder_init(&i915->drm, &intel_encoder->base,
  			 &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
  			 "HDMI %c", port_name(port));
intel_encoder->hotplug = intel_encoder_hotplug;
  	intel_encoder->compute_config = intel_hdmi_compute_config;
-	if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		intel_encoder->disable = pch_disable_hdmi;
  		intel_encoder->post_disable = pch_post_disable_hdmi;
  	} else {
@@ -3152,22 +3152,22 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
  	}
  	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
  	intel_encoder->get_config = intel_hdmi_get_config;
-	if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
  		intel_encoder->pre_enable = chv_hdmi_pre_enable;
  		intel_encoder->enable = vlv_enable_hdmi;
  		intel_encoder->post_disable = chv_hdmi_post_disable;
  		intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915)) {
  		intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
  		intel_encoder->pre_enable = vlv_hdmi_pre_enable;
  		intel_encoder->enable = vlv_enable_hdmi;
  		intel_encoder->post_disable = vlv_hdmi_post_disable;
  	} else {
  		intel_encoder->pre_enable = intel_hdmi_pre_enable;
-		if (HAS_PCH_CPT(dev_priv))
+		if (HAS_PCH_CPT(i915))
  			intel_encoder->enable = cpt_enable_hdmi;
-		else if (HAS_PCH_IBX(dev_priv))
+		else if (HAS_PCH_IBX(i915))
  			intel_encoder->enable = ibx_enable_hdmi;
  		else
  			intel_encoder->enable = g4x_enable_hdmi;
@@ -3176,7 +3176,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
  	intel_encoder->type = INTEL_OUTPUT_HDMI;
  	intel_encoder->power_domain = intel_port_to_power_domain(port);
  	intel_encoder->port = port;
-	if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		if (port == PORT_D)
  			intel_encoder->crtc_mask = 1 << 2;
  		else
@@ -3190,7 +3190,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
  	 * to work on real hardware. And since g4x can send infoframes to
  	 * only one port anyway, nothing is lost by allowing it.
  	 */
-	if (IS_G4X(dev_priv))
+	if (IS_G4X(i915))
  		intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
@@ -3199,6 +3199,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(intel_dig_port); - intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(i915, port);
  	intel_hdmi_init_connector(intel_dig_port, intel_connector);
  }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/intel_hdmi.h
index 106c2e0bc3c9..46f03356b54b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/intel_hdmi.h
@@ -24,7 +24,7 @@ struct intel_hdmi;
  struct drm_connector_state;
  union hdmi_infoframe;
-void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+void intel_hdmi_init(struct drm_i915_private *i915, i915_reg_t hdmi_reg,
  		     enum port port);
  void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
  			       struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index ff9eb3c855d3..a551b9ab2e17 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -78,7 +78,7 @@
/**
   * intel_hpd_pin_default - return default pin associated with certain port.
- * @dev_priv: private driver data pointer
+ * @i915: private driver data pointer
   * @port: the hpd port to get associated pin
   *
   * It is only valid and used by digital port encoder.
@@ -86,7 +86,7 @@
   * Return pin that is associatade with @port and HDP_NONE if no pin is
   * hard associated with that @port.
   */
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *i915,
  				   enum port port)
  {
  	switch (port) {
@@ -101,7 +101,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
  	case PORT_E:
  		return HPD_PORT_E;
  	case PORT_F:
-		if (IS_CNL_WITH_PORT_F(dev_priv))
+		if (IS_CNL_WITH_PORT_F(i915))
  			return HPD_PORT_E;
  		return HPD_PORT_F;
  	default:
@@ -115,7 +115,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
/**
   * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @i915: private driver data pointer
   * @pin: the pin to gather stats on
   * @long_hpd: whether the HPD IRQ was long or short
   *
@@ -124,13 +124,13 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
   * responsible for further action.
   *
   * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * stored in @i915->hotplug.hpd_storm_threshold which defaults to
   * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
   * short IRQs count as +1. If this threshold is exceeded, it's considered an
   * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
   *
   * By default, most systems will only count long IRQs towards
- * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * &i915->hotplug.hpd_storm_threshold. However, some older systems also
   * suffer from short IRQ storms and must also track these. Because short IRQ
   * storms are naturally caused by sideband interactions with DP MST devices,
   * short IRQ detection is only enabled for systems without DP MST support.
@@ -142,10 +142,10 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
   *
   * Return true if an IRQ storm was detected on @pin.
   */
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *i915,
  				       enum hpd_pin pin, bool long_hpd)
  {
-	struct i915_hotplug *hpd = &dev_priv->hotplug;
+	struct i915_hotplug *hpd = &i915->hotplug;
  	unsigned long start = hpd->stats[pin].last_jiffies;
  	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
  	const int increment = long_hpd ? 10 : 1;
@@ -153,7 +153,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
  	bool storm = false;
if (!threshold ||
-	    (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+	    (!long_hpd && !i915->hotplug.hpd_short_storm_enabled))
  		return false;
if (!time_in_range(jiffies, start, end)) {
@@ -175,9 +175,9 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
  }
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_connector *intel_connector;
  	struct intel_encoder *intel_encoder;
  	struct drm_connector *connector;
@@ -185,7 +185,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
  	enum hpd_pin pin;
  	bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&i915->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
  	drm_for_each_connector_iter(connector, &conn_iter) {
@@ -199,14 +199,14 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_encoder->hpd_pin;
  		if (pin == HPD_NONE ||
-		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+		    i915->hotplug.stats[pin].state != HPD_MARK_DISABLED)
  			continue;
DRM_INFO("HPD interrupt storm detected on connector %s: "
  			 "switching from hotplug detection to polling\n",
  			 connector->name);
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+		i915->hotplug.stats[pin].state = HPD_DISABLED;
  		connector->polled = DRM_CONNECTOR_POLL_CONNECT
  			| DRM_CONNECTOR_POLL_DISCONNECT;
  		hpd_disabled = true;
@@ -216,31 +216,31 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
  	/* Enable polling and queue hotplug re-enabling. */
  	if (hpd_disabled) {
  		drm_kms_helper_poll_enable(dev);
-		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+		mod_delayed_work(system_wq, &i915->hotplug.reenable_work,
  				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
  	}
  }
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915),
  			     hotplug.reenable_work.work);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	intel_wakeref_t wakeref;
  	enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(dev_priv);
+	wakeref = intel_runtime_pm_get(i915);
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
  	for_each_hpd_pin(pin) {
  		struct drm_connector *connector;
  		struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+		if (i915->hotplug.stats[pin].state != HPD_DISABLED)
  			continue;
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+		i915->hotplug.stats[pin].state = HPD_ENABLED;
drm_connector_list_iter_begin(dev, &conn_iter);
  		drm_for_each_connector_iter(connector, &conn_iter) {
@@ -259,11 +259,11 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
  		}
  		drm_connector_list_iter_end(&conn_iter);
  	}
-	if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
+		i915->display.hpd_irq_setup(i915);
+	spin_unlock_irq(&i915->irq_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+	intel_runtime_pm_put(i915, wakeref);
  }
bool intel_encoder_hotplug(struct intel_encoder *encoder,
@@ -298,20 +298,20 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
static void i915_digport_work_func(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private, hotplug.dig_port_work);
  	u32 long_port_mask, short_port_mask;
  	struct intel_encoder *encoder;
  	u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
-	long_port_mask = dev_priv->hotplug.long_port_mask;
-	dev_priv->hotplug.long_port_mask = 0;
-	short_port_mask = dev_priv->hotplug.short_port_mask;
-	dev_priv->hotplug.short_port_mask = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	long_port_mask = i915->hotplug.long_port_mask;
+	i915->hotplug.long_port_mask = 0;
+	short_port_mask = i915->hotplug.short_port_mask;
+	i915->hotplug.short_port_mask = 0;
+	spin_unlock_irq(&i915->irq_lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		struct intel_digital_port *dig_port;
  		enum port port = encoder->port;
  		bool long_hpd, short_hpd;
@@ -336,10 +336,10 @@ static void i915_digport_work_func(struct work_struct *work)
  	}
if (old_bits) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		dev_priv->hotplug.event_bits |= old_bits;
-		spin_unlock_irq(&dev_priv->irq_lock);
-		schedule_work(&dev_priv->hotplug.hotplug_work);
+		spin_lock_irq(&i915->irq_lock);
+		i915->hotplug.event_bits |= old_bits;
+		spin_unlock_irq(&i915->irq_lock);
+		schedule_work(&i915->hotplug.hotplug_work);
  	}
  }
@@ -348,9 +348,9 @@ static void i915_digport_work_func(struct work_struct *work)
   */
  static void i915_hotplug_work_func(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_connector *intel_connector;
  	struct intel_encoder *intel_encoder;
  	struct drm_connector *connector;
@@ -361,15 +361,15 @@ static void i915_hotplug_work_func(struct work_struct *work)
  	mutex_lock(&dev->mode_config.mutex);
  	DRM_DEBUG_KMS("running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- hpd_event_bits = dev_priv->hotplug.event_bits;
-	dev_priv->hotplug.event_bits = 0;
+	hpd_event_bits = i915->hotplug.event_bits;
+	i915->hotplug.event_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
-	intel_hpd_irq_storm_switch_to_polling(dev_priv);
+	intel_hpd_irq_storm_switch_to_polling(i915);
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
  	drm_for_each_connector_iter(connector, &conn_iter) {
@@ -395,7 +395,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
   * intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @i915: drm_i915_private
   * @pin_mask: a mask of hpd pins that have triggered the irq
   * @long_mask: a mask of hpd pins that may be long hpd pulses
   *
@@ -409,7 +409,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
   * Here, we do hotplug irq storm detection and mitigation, and pass further
   * processing to appropriate bottom halves.
   */
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct drm_i915_private *i915,
  			   u32 pin_mask, u32 long_mask)
  {
  	struct intel_encoder *encoder;
@@ -422,7 +422,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	if (!pin_mask)
  		return;
- spin_lock(&dev_priv->irq_lock);
+	spin_lock(&i915->irq_lock);
/*
  	 * Determine whether ->hpd_pulse() exists for each pin, and
@@ -430,7 +430,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	 * as each pin may have up to two encoders (HDMI and DP) and
  	 * only the one of them (DP) will have ->hpd_pulse().
  	 */
-	for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
  		enum port port = encoder->port;
  		bool long_hpd;
@@ -450,10 +450,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (long_hpd) {
  			long_hpd_pulse_mask |= BIT(pin);
-			dev_priv->hotplug.long_port_mask |= BIT(port);
+			i915->hotplug.long_port_mask |= BIT(port);
  		} else {
  			short_hpd_pulse_mask |= BIT(pin);
-			dev_priv->hotplug.short_port_mask |= BIT(port);
+			i915->hotplug.short_port_mask |= BIT(port);
  		}
  	}
@@ -464,19 +464,19 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  		if (!(BIT(pin) & pin_mask))
  			continue;
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
+		if (i915->hotplug.stats[pin].state == HPD_DISABLED) {
  			/*
  			 * On GMCH platforms the interrupt mask bits only
  			 * prevent irq generation, not the setting of the
  			 * hotplug bits itself. So only WARN about unexpected
  			 * interrupts on saner platforms.
  			 */
-			WARN_ONCE(!HAS_GMCH(dev_priv),
+			WARN_ONCE(!HAS_GMCH(i915),
  				  "Received HPD interrupt on pin %d although disabled\n", pin);
  			continue;
  		}
- if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
+		if (i915->hotplug.stats[pin].state != HPD_ENABLED)
  			continue;
/*
@@ -487,13 +487,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  		if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
  			long_hpd = long_hpd_pulse_mask & BIT(pin);
  		} else {
-			dev_priv->hotplug.event_bits |= BIT(pin);
+			i915->hotplug.event_bits |= BIT(pin);
  			long_hpd = true;
  			queue_hp = true;
  		}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
-			dev_priv->hotplug.event_bits &= ~BIT(pin);
+		if (intel_hpd_irq_storm_detect(i915, pin, long_hpd)) {
+			i915->hotplug.event_bits &= ~BIT(pin);
  			storm_detected = true;
  			queue_hp = true;
  		}
@@ -503,9 +503,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	 * Disable any IRQs that storms were detected on. Polling enablement
  	 * happens later in our hotplug work.
  	 */
-	if (storm_detected && dev_priv->display_irqs_enabled)
-		dev_priv->display.hpd_irq_setup(dev_priv);
-	spin_unlock(&dev_priv->irq_lock);
+	if (storm_detected && i915->display_irqs_enabled)
+		i915->display.hpd_irq_setup(i915);
+	spin_unlock(&i915->irq_lock);
/*
  	 * Our hotplug handler can grab modeset locks (by calling down into the
@@ -514,14 +514,14 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  	 * deadlock.
  	 */
  	if (queue_dig)
-		queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+		queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
  	if (queue_hp)
-		schedule_work(&dev_priv->hotplug.hotplug_work);
+		schedule_work(&i915->hotplug.hotplug_work);
  }
/**
   * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function enables the hotplug support. It requires that interrupts have
   * already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -533,43 +533,43 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
   *
   * Also see: intel_hpd_poll_init(), which enables connector polling
   */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct drm_i915_private *i915)
  {
  	int i;
for_each_hpd_pin(i) {
-		dev_priv->hotplug.stats[i].count = 0;
-		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+		i915->hotplug.stats[i].count = 0;
+		i915->hotplug.stats[i].state = HPD_ENABLED;
  	}
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
-	schedule_work(&dev_priv->hotplug.poll_init_work);
+	WRITE_ONCE(i915->hotplug.poll_enabled, false);
+	schedule_work(&i915->hotplug.poll_init_work);
/*
  	 * Interrupt setup is already guaranteed to be single-threaded, this is
  	 * just to make the assert_spin_locked checks happy.
  	 */
-	if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		if (dev_priv->display_irqs_enabled)
-			dev_priv->display.hpd_irq_setup(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
+	if (i915->display_irqs_enabled && i915->display.hpd_irq_setup) {
+		spin_lock_irq(&i915->irq_lock);
+		if (i915->display_irqs_enabled)
+			i915->display.hpd_irq_setup(i915);
+		spin_unlock_irq(&i915->irq_lock);
  	}
  }
static void i915_hpd_poll_init_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(work, struct drm_i915_private,
  			     hotplug.poll_init_work);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	bool enabled;
mutex_lock(&dev->mode_config.mutex); - enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+	enabled = READ_ONCE(i915->hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
  	drm_for_each_connector_iter(connector, &conn_iter) {
@@ -582,7 +582,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
  		if (intel_connector->mst_port)
  			continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
+		if (!connector->polled && I915_HAS_HOTPLUG(i915) &&
  		    intel_connector->encoder->hpd_pin > HPD_NONE) {
  			connector->polled = enabled ?
  				DRM_CONNECTOR_POLL_CONNECT |
@@ -607,7 +607,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
   * intel_hpd_poll_init - enables/disables polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
   *
   * This function enables polling for all connectors, regardless of whether or
   * not they support hotplug detection. Under certain conditions HPD may not be
@@ -621,9 +621,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
   *
   * Also see: intel_hpd_init(), which restores hpd handling.
   */
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_init(struct drm_i915_private *i915)
  {
-	WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+	WRITE_ONCE(i915->hotplug.poll_enabled, true);
/*
  	 * We might already be holding dev->mode_config.mutex, so do this in a
@@ -631,57 +631,57 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
  	 * As well, there's no issue if we race here since we always reschedule
  	 * this worker anyway
  	 */
-	schedule_work(&dev_priv->hotplug.poll_init_work);
+	schedule_work(&i915->hotplug.poll_init_work);
  }
-void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+void intel_hpd_init_work(struct drm_i915_private *i915)
  {
-	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
-	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
-	INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
-	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+	INIT_WORK(&i915->hotplug.hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&i915->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_WORK(&i915->hotplug.poll_init_work, i915_hpd_poll_init_work);
+	INIT_DELAYED_WORK(&i915->hotplug.reenable_work,
  			  intel_hpd_irq_storm_reenable_work);
  }
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct drm_i915_private *i915)
  {
-	spin_lock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
- dev_priv->hotplug.long_port_mask = 0;
-	dev_priv->hotplug.short_port_mask = 0;
-	dev_priv->hotplug.event_bits = 0;
+	i915->hotplug.long_port_mask = 0;
+	i915->hotplug.short_port_mask = 0;
+	i915->hotplug.event_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
- cancel_work_sync(&dev_priv->hotplug.dig_port_work);
-	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
-	cancel_work_sync(&dev_priv->hotplug.poll_init_work);
-	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+	cancel_work_sync(&i915->hotplug.dig_port_work);
+	cancel_work_sync(&i915->hotplug.hotplug_work);
+	cancel_work_sync(&i915->hotplug.poll_init_work);
+	cancel_delayed_work_sync(&i915->hotplug.reenable_work);
  }
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+bool intel_hpd_disable(struct drm_i915_private *i915, enum hpd_pin pin)
  {
  	bool ret = false;
if (pin == HPD_NONE)
  		return false;
- spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
-		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+	spin_lock_irq(&i915->irq_lock);
+	if (i915->hotplug.stats[pin].state == HPD_ENABLED) {
+		i915->hotplug.stats[pin].state = HPD_DISABLED;
  		ret = true;
  	}
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_unlock_irq(&i915->irq_lock);
return ret;
  }
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+void intel_hpd_enable(struct drm_i915_private *i915, enum hpd_pin pin)
  {
  	if (pin == HPD_NONE)
  		return;
- spin_lock_irq(&dev_priv->irq_lock);
-	dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&i915->irq_lock);
+	i915->hotplug.stats[pin].state = HPD_ENABLED;
+	spin_unlock_irq(&i915->irq_lock);
  }
diff --git a/drivers/gpu/drm/i915/intel_hotplug.h b/drivers/gpu/drm/i915/intel_hotplug.h
index 805f897dbb7a..2fcc3148b369 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/intel_hotplug.h
@@ -14,17 +14,17 @@ struct drm_i915_private;
  struct intel_connector;
  struct intel_encoder;
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
+void intel_hpd_poll_init(struct drm_i915_private *i915);
  bool intel_encoder_hotplug(struct intel_encoder *encoder,
  			   struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct drm_i915_private *i915,
  			   u32 pin_mask, u32 long_mask);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+void intel_hpd_init(struct drm_i915_private *i915);
+void intel_hpd_init_work(struct drm_i915_private *i915);
+void intel_hpd_cancel_work(struct drm_i915_private *i915);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *i915,
  				   enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+bool intel_hpd_disable(struct drm_i915_private *i915, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *i915, enum hpd_pin pin);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 8572a0588efc..2b23662698a9 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -167,14 +167,14 @@ int intel_huc_auth(struct intel_huc *huc)
   */
  int intel_huc_check_status(struct intel_huc *huc)
  {
-	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+	struct drm_i915_private *i915 = huc_to_i915(huc);
  	intel_wakeref_t wakeref;
  	bool status = false;
- if (!HAS_HUC(dev_priv))
+	if (!HAS_HUC(i915))
  		return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+	with_intel_runtime_pm(i915, wakeref)
  		status = (I915_READ(huc->status.reg) & huc->status.mask) ==
  			  huc->status.value;
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 05cbf8338f53..03aed0664726 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -69,34 +69,34 @@ MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
  static void huc_fw_select(struct intel_uc_fw *huc_fw)
  {
  	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
-	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+	struct drm_i915_private *i915 = huc_to_i915(huc);
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - if (!HAS_HUC(dev_priv))
+	if (!HAS_HUC(i915))
  		return;
if (i915_modparams.huc_firmware_path) {
  		huc_fw->path = i915_modparams.huc_firmware_path;
  		huc_fw->major_ver_wanted = 0;
  		huc_fw->minor_ver_wanted = 0;
-	} else if (IS_SKYLAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)) {
  		huc_fw->path = I915_SKL_HUC_UCODE;
  		huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
  		huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
  		huc_fw->path = I915_BXT_HUC_UCODE;
  		huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
  		huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
-	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+	} else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
  		huc_fw->path = I915_KBL_HUC_UCODE;
  		huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
  		huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
-	} else if (IS_GEMINILAKE(dev_priv)) {
+	} else if (IS_GEMINILAKE(i915)) {
  		huc_fw->path = I915_GLK_HUC_UCODE;
  		huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
  		huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
-	} else if (IS_ICELAKE(dev_priv)) {
+	} else if (IS_ICELAKE(i915)) {
  		huc_fw->path = I915_ICL_HUC_UCODE;
  		huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
  		huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
@@ -135,8 +135,8 @@ static void huc_xfer_rsa(struct intel_huc *huc)
  static int huc_xfer_ucode(struct intel_huc *huc)
  {
  	struct intel_uc_fw *huc_fw = &huc->fw;
-	struct drm_i915_private *dev_priv = huc_to_i915(huc);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = huc_to_i915(huc);
+	struct intel_uncore *uncore = &i915->uncore;
  	unsigned long offset = 0;
  	u32 size;
  	int ret;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index b19800b58442..d95ae05ccebc 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -73,12 +73,12 @@
  #include "i915_drv.h"
  #include "intel_lpe_audio.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
+#define HAS_LPE_AUDIO(i915) ((i915)->lpe_audio.platdev != NULL)
static struct platform_device *
-lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+lpe_audio_platdev_create(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct platform_device_info pinfo = {};
  	struct resource *rsc;
  	struct platform_device *platdev;
@@ -94,7 +94,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
  		return ERR_PTR(-ENOMEM);
  	}
- rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq;
+	rsc[0].start    = rsc[0].end = i915->lpe_audio.irq;
  	rsc[0].flags    = IORESOURCE_IRQ;
  	rsc[0].name     = "hdmi-lpe-audio-irq";
@@ -114,8 +114,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
  	pinfo.size_data = sizeof(*pdata);
  	pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
-	pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+	pdata->num_pipes = INTEL_INFO(i915)->num_pipes;
+	pdata->num_ports = IS_CHERRYVIEW(i915) ? 3 : 2; /* B,C,D or B,C */
  	pdata->port[0].pipe = -1;
  	pdata->port[1].pipe = -1;
  	pdata->port[2].pipe = -1;
@@ -135,7 +135,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
  	return platdev;
  }
-static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+static void lpe_audio_platdev_destroy(struct drm_i915_private *i915)
  {
  	/* XXX Note that platform_device_register_full() allocates a dma_mask
  	 * and never frees it. We can't free it here as we cannot guarantee
@@ -145,7 +145,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
  	 * than us fiddle with its internals.
  	 */
- platform_device_unregister(dev_priv->lpe_audio.platdev);
+	platform_device_unregister(i915->lpe_audio.platdev);
  }
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -162,24 +162,24 @@ static struct irq_chip lpe_audio_irqchip = {
  	.irq_unmask = lpe_audio_irq_unmask,
  };
-static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
+static int lpe_audio_irq_init(struct drm_i915_private *i915)
  {
-	int irq = dev_priv->lpe_audio.irq;
+	int irq = i915->lpe_audio.irq;
- WARN_ON(!intel_irqs_enabled(dev_priv));
+	WARN_ON(!intel_irqs_enabled(i915));
  	irq_set_chip_and_handler_name(irq,
  				&lpe_audio_irqchip,
  				handle_simple_irq,
  				"hdmi_lpe_audio_irq_handler");
- return irq_set_chip_data(irq, dev_priv);
+	return irq_set_chip_data(irq, i915);
  }
-static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
+static bool lpe_audio_detect(struct drm_i915_private *i915)
  {
  	int lpe_present = false;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		static const struct pci_device_id atom_hdaudio_ids[] = {
  			/* Baytrail */
  			{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
@@ -196,21 +196,21 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
  	return lpe_present;
  }
-static int lpe_audio_setup(struct drm_i915_private *dev_priv)
+static int lpe_audio_setup(struct drm_i915_private *i915)
  {
  	int ret;
- dev_priv->lpe_audio.irq = irq_alloc_desc(0);
-	if (dev_priv->lpe_audio.irq < 0) {
+	i915->lpe_audio.irq = irq_alloc_desc(0);
+	if (i915->lpe_audio.irq < 0) {
  		DRM_ERROR("Failed to allocate IRQ desc: %d\n",
-			dev_priv->lpe_audio.irq);
-		ret = dev_priv->lpe_audio.irq;
+			i915->lpe_audio.irq);
+		ret = i915->lpe_audio.irq;
  		goto err;
  	}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+	DRM_DEBUG("irq = %d\n", i915->lpe_audio.irq);
- ret = lpe_audio_irq_init(dev_priv);
+	ret = lpe_audio_irq_init(i915);
if (ret) {
  		DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
@@ -218,10 +218,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
  		goto err_free_irq;
  	}
- dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv);
+	i915->lpe_audio.platdev = lpe_audio_platdev_create(i915);
- if (IS_ERR(dev_priv->lpe_audio.platdev)) {
-		ret = PTR_ERR(dev_priv->lpe_audio.platdev);
+	if (IS_ERR(i915->lpe_audio.platdev)) {
+		ret = PTR_ERR(i915->lpe_audio.platdev);
  		DRM_ERROR("Failed to create lpe audio platform device: %d\n",
  			ret);
  		goto err_free_irq;
@@ -234,28 +234,28 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
  err_free_irq:
-	irq_free_desc(dev_priv->lpe_audio.irq);
+	irq_free_desc(i915->lpe_audio.irq);
  err:
-	dev_priv->lpe_audio.irq = -1;
-	dev_priv->lpe_audio.platdev = NULL;
+	i915->lpe_audio.irq = -1;
+	i915->lpe_audio.platdev = NULL;
  	return ret;
  }
/**
   * intel_lpe_audio_irq_handler() - forwards the LPE audio irq
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   *
   * the LPE Audio irq is forwarded to the irq handler registered by LPE audio
   * driver.
   */
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_irq_handler(struct drm_i915_private *i915)
  {
  	int ret;
- if (!HAS_LPE_AUDIO(dev_priv))
+	if (!HAS_LPE_AUDIO(i915))
  		return;
- ret = generic_handle_irq(dev_priv->lpe_audio.irq);
+	ret = generic_handle_irq(i915->lpe_audio.irq);
  	if (ret)
  		DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
  				ret);
@@ -264,17 +264,17 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
  /**
   * intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
   * driver and i915
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   *
   * Return: 0 if successful. non-zero if detection or
   * llocation/initialization fails
   */
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+int intel_lpe_audio_init(struct drm_i915_private *i915)
  {
  	int ret = -ENODEV;
- if (lpe_audio_detect(dev_priv)) {
-		ret = lpe_audio_setup(dev_priv);
+	if (lpe_audio_detect(i915)) {
+		ret = lpe_audio_setup(i915);
  		if (ret < 0)
  			DRM_ERROR("failed to setup LPE Audio bridge\n");
  	}
@@ -284,31 +284,31 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
  /**
   * intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
   * audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   *
   * release all the resources for LPE audio <-> i915 bridge.
   */
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_teardown(struct drm_i915_private *i915)
  {
  	struct irq_desc *desc;
- if (!HAS_LPE_AUDIO(dev_priv))
+	if (!HAS_LPE_AUDIO(i915))
  		return;
- desc = irq_to_desc(dev_priv->lpe_audio.irq);
+	desc = irq_to_desc(i915->lpe_audio.irq);
- lpe_audio_platdev_destroy(dev_priv);
+	lpe_audio_platdev_destroy(i915);
- irq_free_desc(dev_priv->lpe_audio.irq);
+	irq_free_desc(i915->lpe_audio.irq);
- dev_priv->lpe_audio.irq = -1;
-	dev_priv->lpe_audio.platdev = NULL;
+	i915->lpe_audio.irq = -1;
+	i915->lpe_audio.platdev = NULL;
  }
/**
   * intel_lpe_audio_notify() - notify lpe audio event
   * audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
   * @pipe: pipe
   * @port: port
   * @eld : ELD data
@@ -317,7 +317,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
   *
   * Notify lpe audio driver of eld change.
   */
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+void intel_lpe_audio_notify(struct drm_i915_private *i915,
  			    enum pipe pipe, enum port port,
  			    const void *eld, int ls_clock, bool dp_output)
  {
@@ -326,10 +326,10 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
  	struct intel_hdmi_lpe_audio_port_pdata *ppdata;
  	u32 audio_enable;
- if (!HAS_LPE_AUDIO(dev_priv))
+	if (!HAS_LPE_AUDIO(i915))
  		return;
- pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
+	pdata = dev_get_platdata(&i915->lpe_audio.platdev->dev);
  	ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@@ -357,7 +357,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
  	}
if (pdata->notify_audio_lpe)
-		pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
+		pdata->notify_audio_lpe(i915->lpe_audio.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
  }
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.h b/drivers/gpu/drm/i915/intel_lpe_audio.h
index f848c5038714..df43abce3c95 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.h
@@ -12,10 +12,10 @@ enum pipe;
  enum port;
  struct drm_i915_private;
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+int  intel_lpe_audio_init(struct drm_i915_private *i915);
+void intel_lpe_audio_teardown(struct drm_i915_private *i915);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *i915);
+void intel_lpe_audio_notify(struct drm_i915_private *i915,
  			    enum pipe pipe, enum port port,
  			    const void *eld, int ls_clock, bool dp_output);
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 7028d0cf3bb1..695465f97195 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -555,10 +555,10 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
  	struct intel_dp *dp = &intel_dig_port->dp;
  	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
  	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_connector *connector = &dp->attached_connector->base;
- if (!HAS_LSPCON(dev_priv)) {
+	if (!HAS_LSPCON(i915)) {
  		DRM_ERROR("LSPCON is not supported on this platform\n");
  		return false;
  	}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index efefed62a7f8..0405f9834827 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -80,7 +80,7 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
  	return container_of(encoder, struct intel_lvds_encoder, base.base);
  }
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
  			     i915_reg_t lvds_reg, enum pipe *pipe)
  {
  	u32 val;
@@ -88,7 +88,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
  	val = I915_READ(lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
-	if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		*pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
  	else
  		*pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
@@ -99,19 +99,19 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
  static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
  				    enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
  	intel_wakeref_t wakeref;
  	bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
- ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+	ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
  }
@@ -119,7 +119,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
  static void intel_lvds_get_config(struct intel_encoder *encoder,
  				  struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
  	u32 tmp, flags = 0;
@@ -137,12 +137,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.flags |= flags; - if (INTEL_GEN(dev_priv) < 5)
+	if (INTEL_GEN(i915) < 5)
  		pipe_config->gmch_pfit.lvds_border_bits =
  			tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
-	if (INTEL_GEN(dev_priv) < 4) {
+	if (INTEL_GEN(i915) < 4) {
  		tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
@@ -151,7 +151,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
  	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
  }
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct drm_i915_private *i915,
  					struct intel_lvds_pps *pps)
  {
  	u32 val;
@@ -180,7 +180,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
  	/* Convert from 100ms to 100us units */
  	pps->t4 = val * 1000;
- if (INTEL_GEN(dev_priv) <= 4 &&
+	if (INTEL_GEN(i915) <= 4 &&
  	    pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
  		DRM_DEBUG_KMS("Panel power timings uninitialized, "
  			      "setting defaults\n");
@@ -198,7 +198,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
  			 pps->divider, pps->port, pps->powerdown_on_reset);
  }
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct drm_i915_private *i915,
  				   struct intel_lvds_pps *pps)
  {
  	u32 val;
@@ -229,26 +229,26 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
  				  const struct drm_connector_state *conn_state)
  {
  	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
  	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  	int pipe = crtc->pipe;
  	u32 temp;
- if (HAS_PCH_SPLIT(dev_priv)) {
-		assert_fdi_rx_pll_disabled(dev_priv, pipe);
-		assert_shared_dpll_disabled(dev_priv,
+	if (HAS_PCH_SPLIT(i915)) {
+		assert_fdi_rx_pll_disabled(i915, pipe);
+		assert_shared_dpll_disabled(i915,
  					    pipe_config->shared_dpll);
  	} else {
-		assert_pll_disabled(dev_priv, pipe);
+		assert_pll_disabled(i915, pipe);
  	}
- intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+	intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
  	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev_priv)) {
+	if (HAS_PCH_CPT(i915)) {
  		temp &= ~LVDS_PIPE_SEL_MASK_CPT;
  		temp |= LVDS_PIPE_SEL_CPT(pipe);
  	} else {
@@ -283,7 +283,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
  	 * special lvds dither control bit on pch-split platforms, dithering is
  	 * only controlled through the PIPECONF reg.
  	 */
-	if (IS_GEN(dev_priv, 4)) {
+	if (IS_GEN(i915, 4)) {
  		/*
  		 * Bspec wording suggests that LVDS port dithering only exists
  		 * for 18bpp panels.
@@ -311,14 +311,14 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
  {
  	struct drm_device *dev = encoder->base.dev;
  	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
  	POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    PP_STATUS(0), PP_ON, PP_ON, 5000))
  		DRM_ERROR("timed out waiting for panel to power on\n");
@@ -330,10 +330,10 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
  			       const struct drm_connector_state *old_conn_state)
  {
  	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    PP_STATUS(0), PP_ON, 0, 1000))
  		DRM_ERROR("timed out waiting for panel to power off\n");
@@ -389,7 +389,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
  				     struct intel_crtc_state *pipe_config,
  				     struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
  	struct intel_lvds_encoder *lvds_encoder =
  		to_lvds_encoder(&intel_encoder->base);
  	struct intel_connector *intel_connector =
@@ -399,7 +399,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
  	unsigned int lvds_bpp;
/* Should never happen!! */
-	if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
+	if (INTEL_GEN(i915) < 4 && intel_crtc->pipe == 0) {
  		DRM_ERROR("Can't support LVDS on pipe A\n");
  		return -EINVAL;
  	}
@@ -429,7 +429,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
  	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
  		return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -751,11 +751,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
  	{ }	/* terminating entry */
  };
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
  {
  	struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+	for_each_intel_encoder(&i915->drm, encoder) {
  		if (encoder->type == INTEL_OUTPUT_LVDS)
  			return encoder;
  	}
@@ -763,9 +763,9 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
  	return NULL;
  }
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
  {
-	struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+	struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
  }
@@ -774,7 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
  {
  	struct drm_device *dev = lvds_encoder->base.base.dev;
  	unsigned int val;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
/* use the module option value if specified */
  	if (i915_modparams.lvds_channel_mode > 0)
@@ -794,26 +794,26 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
  	 * register is uninitialized.
  	 */
  	val = I915_READ(lvds_encoder->reg);
-	if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
  	else
  		val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
  	if (val == 0)
-		val = dev_priv->vbt.bios_lvds_val;
+		val = i915->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
  }
/**
   * intel_lvds_init - setup LVDS connectors on this device
- * @dev_priv: i915 device
+ * @i915: i915 device
   *
   * Create the connector, register the LVDS DDC bus, and try to figure out what
   * modes we can display on the LVDS panel (if present).
   */
-void intel_lvds_init(struct drm_i915_private *dev_priv)
+void intel_lvds_init(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_lvds_encoder *lvds_encoder;
  	struct intel_encoder *intel_encoder;
  	struct intel_connector *intel_connector;
@@ -829,30 +829,30 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
  	if (dmi_check_system(intel_no_lvds)) {
-		WARN(!dev_priv->vbt.int_lvds_support,
+		WARN(!i915->vbt.int_lvds_support,
  		     "Useless DMI match. Internal LVDS support disabled by VBT\n");
  		return;
  	}
- if (!dev_priv->vbt.int_lvds_support) {
+	if (!i915->vbt.int_lvds_support) {
  		DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
  		return;
  	}
- if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		lvds_reg = PCH_LVDS;
  	else
  		lvds_reg = LVDS;
lvds = I915_READ(lvds_reg); - if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		if ((lvds & LVDS_DETECTED) == 0)
  			return;
  	}
pin = GMBUS_PIN_PANEL;
-	if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+	if (!intel_bios_is_lvds_present(i915, &pin)) {
  		if ((lvds & LVDS_PORT_EN) == 0) {
  			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
  			return;
@@ -884,7 +884,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
  	intel_encoder->enable = intel_enable_lvds;
  	intel_encoder->pre_enable = intel_pre_enable_lvds;
  	intel_encoder->compute_config = intel_lvds_compute_config;
-	if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		intel_encoder->disable = pch_disable_lvds;
  		intel_encoder->post_disable = pch_post_disable_lvds;
  	} else {
@@ -901,9 +901,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
  	intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
  	intel_encoder->port = PORT_NONE;
  	intel_encoder->cloneable = 0;
-	if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-	else if (IS_GEN(dev_priv, 4))
+	else if (IS_GEN(i915, 4))
  		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
  	else
  		intel_encoder->crtc_mask = (1 << 1);
@@ -922,7 +922,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
  	drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
  	connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+	intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
  	lvds_encoder->init_lvds_val = lvds;
/*
@@ -940,10 +940,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
  	mutex_lock(&dev->mode_config.mutex);
  	if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
  		edid = drm_get_edid_switcheroo(connector,
-				    intel_gmbus_get_adapter(dev_priv, pin));
+				    intel_gmbus_get_adapter(i915, pin));
  	else
  		edid = drm_get_edid(connector,
-				    intel_gmbus_get_adapter(dev_priv, pin));
+				    intel_gmbus_get_adapter(i915, pin));
  	if (edid) {
  		if (drm_add_edid_modes(connector, edid)) {
  			drm_connector_update_edid_property(connector,
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/intel_lvds.h
index bc9c8b84ba2f..4afdcb4d6352 100644
--- a/drivers/gpu/drm/i915/intel_lvds.h
+++ b/drivers/gpu/drm/i915/intel_lvds.h
@@ -13,10 +13,10 @@
  enum pipe;
  struct drm_i915_private;
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
  			     i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct drm_i915_private *i915);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915);
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915);
#endif /* __INTEL_LVDS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 8fa1159d097f..6cbe775d7537 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -266,11 +266,11 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500 -static int swsci(struct drm_i915_private *dev_priv,
+static int swsci(struct drm_i915_private *i915,
  		 u32 function, u32 parm, u32 *parm_out)
  {
-	struct opregion_swsci *swsci = dev_priv->opregion.swsci;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct opregion_swsci *swsci = i915->opregion.swsci;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u32 main_function, sub_function, scic;
  	u16 swsci_val;
  	u32 dslp;
@@ -285,11 +285,11 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Check if we can call the function. See swsci_setup for details. */
  	if (main_function == SWSCI_SBCB) {
-		if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+		if ((i915->opregion.swsci_sbcb_sub_functions &
  		     (1 << sub_function)) == 0)
  			return -EINVAL;
  	} else if (main_function == SWSCI_GBDA) {
-		if ((dev_priv->opregion.swsci_gbda_sub_functions &
+		if ((i915->opregion.swsci_gbda_sub_functions &
  		     (1 << sub_function)) == 0)
  			return -EINVAL;
  	}
@@ -363,13 +363,13 @@ static int swsci(struct drm_i915_private *dev_priv,
  int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
  				  bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
  	u32 parm = 0;
  	u32 type = 0;
  	u32 port;
/* don't care about old stuff for now */
-	if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		return 0;
if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -409,7 +409,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
parm |= type << (16 + port * 3); - return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+	return swsci(i915, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
  }
static const struct {
@@ -423,29 +423,29 @@ static const struct {
  	{ PCI_D3cold,	0x04 },
  };
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct drm_i915_private *i915,
  				  pci_power_t state)
  {
  	int i;
- if (!HAS_DDI(dev_priv))
+	if (!HAS_DDI(i915))
  		return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
  		if (state == power_state_map[i].pci_power_state)
-			return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
+			return swsci(i915, SWSCI_SBCB_ADAPTER_POWER_STATE,
  				     power_state_map[i].parm, NULL);
  	}
return -EINVAL;
  }
-static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
+static u32 asle_set_backlight(struct drm_i915_private *i915, u32 bclp)
  {
  	struct intel_connector *connector;
  	struct drm_connector_list_iter conn_iter;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
-	struct drm_device *dev = &dev_priv->drm;
+	struct opregion_asle *asle = i915->opregion.asle;
+	struct drm_device *dev = &i915->drm;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -480,7 +480,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
  	return 0;
  }
-static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
+static u32 asle_set_als_illum(struct drm_i915_private *i915, u32 alsi)
  {
  	/* alsi is the current ALS reading in lux. 0 indicates below sensor
  	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -488,13 +488,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
  	return ASLC_ALS_ILLUM_FAILED;
  }
-static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
+static u32 asle_set_pwm_freq(struct drm_i915_private *i915, u32 pfmb)
  {
  	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
  	return ASLC_PWM_FREQ_FAILED;
  }
-static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
+static u32 asle_set_pfit(struct drm_i915_private *i915, u32 pfit)
  {
  	/* Panel fitting is currently controlled by the X code, so this is a
  	   noop until modesetting support works fully */
@@ -502,13 +502,13 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
  	return ASLC_PFIT_FAILED;
  }
-static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *i915, u32 srot)
  {
  	DRM_DEBUG_DRIVER("SROT is not supported\n");
  	return ASLC_ROTATION_ANGLES_FAILED;
  }
-static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_button_array(struct drm_i915_private *i915, u32 iuer)
  {
  	if (!iuer)
  		DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -526,7 +526,7 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
  	return ASLC_BUTTON_ARRAY_FAILED;
  }
-static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_convertible(struct drm_i915_private *i915, u32 iuer)
  {
  	if (iuer & ASLE_IUER_CONVERTIBLE)
  		DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -536,7 +536,7 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
  	return ASLC_CONVERTIBLE_FAILED;
  }
-static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_docking(struct drm_i915_private *i915, u32 iuer)
  {
  	if (iuer & ASLE_IUER_DOCKING)
  		DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -546,7 +546,7 @@ static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
  	return ASLC_DOCKING_FAILED;
  }
-static u32 asle_isct_state(struct drm_i915_private *dev_priv)
+static u32 asle_isct_state(struct drm_i915_private *i915)
  {
  	DRM_DEBUG_DRIVER("ISCT is not supported\n");
  	return ASLC_ISCT_STATE_FAILED;
@@ -556,9 +556,9 @@ static void asle_work(struct work_struct *work)
  {
  	struct intel_opregion *opregion =
  		container_of(work, struct intel_opregion, asle_work);
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		container_of(opregion, struct drm_i915_private, opregion);
-	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct opregion_asle *asle = i915->opregion.asle;
  	u32 aslc_stat = 0;
  	u32 aslc_req;
@@ -574,40 +574,40 @@ static void asle_work(struct work_struct *work)
  	}
if (aslc_req & ASLC_SET_ALS_ILLUM)
-		aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
+		aslc_stat |= asle_set_als_illum(i915, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
-		aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
+		aslc_stat |= asle_set_backlight(i915, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
-		aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
+		aslc_stat |= asle_set_pfit(i915, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
-		aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
+		aslc_stat |= asle_set_pwm_freq(i915, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
-		aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
+		aslc_stat |= asle_set_supported_rotation_angles(i915,
  							asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
-		aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
+		aslc_stat |= asle_set_button_array(i915, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
-		aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
+		aslc_stat |= asle_set_convertible(i915, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
-		aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
+		aslc_stat |= asle_set_docking(i915, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
-		aslc_stat |= asle_isct_state(dev_priv);
+		aslc_stat |= asle_isct_state(i915);
asle->aslc = aslc_stat;
  }
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+void intel_opregion_asle_intr(struct drm_i915_private *i915)
  {
-	if (dev_priv->opregion.asle)
-		schedule_work(&dev_priv->opregion.asle_work);
+	if (i915->opregion.asle)
+		schedule_work(&i915->opregion.asle_work);
  }
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -702,9 +702,9 @@ static u32 acpi_display_type(struct intel_connector *connector)
  	return display_type;
  }
-static void intel_didl_outputs(struct drm_i915_private *dev_priv)
+static void intel_didl_outputs(struct drm_i915_private *i915)
  {
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &i915->opregion;
  	struct intel_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	int i = 0, max_outputs;
@@ -720,7 +720,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
  	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
  		ARRAY_SIZE(opregion->acpi->did2);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
  	for_each_intel_connector_iter(connector, &conn_iter) {
  		u32 device_id, type;
@@ -749,9 +749,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
  		set_did(opregion, i, 0);
  }
-static void intel_setup_cadls(struct drm_i915_private *dev_priv)
+static void intel_setup_cadls(struct drm_i915_private *i915)
  {
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &i915->opregion;
  	struct intel_connector *connector;
  	struct drm_connector_list_iter conn_iter;
  	int i = 0;
@@ -766,7 +766,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
  	 * Note that internal panels should be at the front of the connector
  	 * list already, ensuring they're not left out.
  	 */
-	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
  	for_each_intel_connector_iter(connector, &conn_iter) {
  		if (i >= ARRAY_SIZE(opregion->acpi->cadl))
  			break;
@@ -779,9 +779,9 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
  		opregion->acpi->cadl[i] = 0;
  }
-static void swsci_setup(struct drm_i915_private *dev_priv)
+static void swsci_setup(struct drm_i915_private *i915)
  {
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &i915->opregion;
  	bool requested_callbacks = false;
  	u32 tmp;
@@ -790,7 +790,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
  	opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
-	if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+	if (swsci(i915, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
  		/* make the bits match the sub-function codes */
  		tmp <<= 1;
  		opregion->swsci_gbda_sub_functions |= tmp;
@@ -801,7 +801,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
  	 * must not call interfaces that are not specifically requested by the
  	 * bios.
  	 */
-	if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+	if (swsci(i915, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
  		/* here, the bits already match sub-function codes */
  		opregion->swsci_sbcb_sub_functions |= tmp;
  		requested_callbacks = true;
@@ -812,7 +812,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
  	 * the callback is _requested_. But we still can't call interfaces that
  	 * are not requested.
  	 */
-	if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+	if (swsci(i915, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
  		/* make the bits match the sub-function codes */
  		u32 low = tmp & 0x7ff;
  		u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -854,9 +854,9 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
  	{ }
  };
-static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
+static int intel_load_vbt_firmware(struct drm_i915_private *i915)
  {
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &i915->opregion;
  	const struct firmware *fw = NULL;
  	const char *name = i915_modparams.vbt_firmware;
  	int ret;
@@ -864,7 +864,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
  	if (!name || !*name)
  		return -ENOENT;
- ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
+	ret = request_firmware(&fw, name, &i915->drm.pdev->dev);
  	if (ret) {
  		DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
  			  name, ret);
@@ -891,10 +891,10 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
  	return ret;
  }
-int intel_opregion_setup(struct drm_i915_private *dev_priv)
+int intel_opregion_setup(struct drm_i915_private *i915)
  {
-	struct intel_opregion *opregion = &dev_priv->opregion;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct intel_opregion *opregion = &i915->opregion;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u32 asls, mboxes;
  	char buf[sizeof(OPREGION_SIGNATURE)];
  	int err = 0;
@@ -945,7 +945,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
  	if (mboxes & MBOX_SWSCI) {
  		DRM_DEBUG_DRIVER("SWSCI supported\n");
  		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-		swsci_setup(dev_priv);
+		swsci_setup(i915);
  	}
if (mboxes & MBOX_ASLE) {
@@ -958,7 +958,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
  	if (mboxes & MBOX_ASLE_EXT)
  		DRM_DEBUG_DRIVER("ASLE extension supported\n");
- if (intel_load_vbt_firmware(dev_priv) == 0)
+	if (intel_load_vbt_firmware(i915) == 0)
  		goto out;
if (dmi_check_system(intel_no_opregion_vbt))
@@ -1043,12 +1043,12 @@ static const struct dmi_system_id intel_use_opregion_panel_type[] = {
  };
int
-intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
+intel_opregion_get_panel_type(struct drm_i915_private *i915)
  {
  	u32 panel_details;
  	int ret;
- ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+	ret = swsci(i915, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
  	if (ret) {
  		DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
  			      ret);
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index 4aa68ffbd30e..8e0b7bdecbe4 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -56,47 +56,47 @@ struct intel_opregion {
#ifdef CONFIG_ACPI -int intel_opregion_setup(struct drm_i915_private *dev_priv);
+int intel_opregion_setup(struct drm_i915_private *i915);
-void intel_opregion_register(struct drm_i915_private *dev_priv);
-void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct drm_i915_private *i915);
+void intel_opregion_unregister(struct drm_i915_private *i915);
-void intel_opregion_resume(struct drm_i915_private *dev_priv);
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+void intel_opregion_resume(struct drm_i915_private *i915);
+void intel_opregion_suspend(struct drm_i915_private *i915,
  			    pci_power_t state);
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+void intel_opregion_asle_intr(struct drm_i915_private *i915);
  int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
  				  bool enable);
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct drm_i915_private *i915,
  				  pci_power_t state);
-int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+int intel_opregion_get_panel_type(struct drm_i915_private *i915);
#else /* CONFIG_ACPI*/ -static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+static inline int intel_opregion_setup(struct drm_i915_private *i915)
  {
  	return 0;
  }
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_register(struct drm_i915_private *i915)
  {
  }
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_unregister(struct drm_i915_private *i915)
  {
  }
-static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct drm_i915_private *i915)
  {
  }
-static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+static inline void intel_opregion_suspend(struct drm_i915_private *i915,
  					  pci_power_t state)
  {
  }
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_asle_intr(struct drm_i915_private *i915)
  {
  }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a2ac06a08715..802bc4c72ecd 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,10 +193,10 @@ struct intel_overlay {
  	struct i915_active_request last_flip;
  };
-static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+static void i830_overlay_clock_gating(struct drm_i915_private *i915,
  				      bool enable)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	u8 val;
/* WA_OVERLAY_CLKGATE:alm */
@@ -247,7 +247,7 @@ static struct i915_request *alloc_request(struct intel_overlay *overlay)
  /* overlay needs to be disable in OCMD reg */
  static int intel_overlay_on(struct intel_overlay *overlay)
  {
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	struct i915_request *rq;
  	u32 *cs;
@@ -265,8 +265,8 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = true; - if (IS_I830(dev_priv))
-		i830_overlay_clock_gating(dev_priv, false);
+	if (IS_I830(i915))
+		i830_overlay_clock_gating(i915, false);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
  	*cs++ = overlay->flip_addr | OFC_UPDATE;
@@ -303,7 +303,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
  				  struct i915_vma *vma,
  				  bool load_polyphase_filter)
  {
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	struct i915_request *rq;
  	u32 flip_addr = overlay->flip_addr;
  	u32 tmp, *cs;
@@ -369,7 +369,7 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
  {
  	struct intel_overlay *overlay =
  		container_of(active, typeof(*overlay), last_flip);
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
intel_overlay_release_old_vma(overlay); @@ -377,8 +377,8 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
  	overlay->crtc = NULL;
  	overlay->active = false;
- if (IS_I830(dev_priv))
-		i830_overlay_clock_gating(dev_priv, true);
+	if (IS_I830(i915))
+		i830_overlay_clock_gating(i915, true);
  }
/* overlay needs to be disabled in OCMD reg */
@@ -437,11 +437,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
   */
  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  {
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	u32 *cs;
  	int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
/* Only wait if there is actually an old frame to release to
  	 * guarantee forward progress.
@@ -477,9 +477,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  	return 0;
  }
-void intel_overlay_reset(struct drm_i915_private *dev_priv)
+void intel_overlay_reset(struct drm_i915_private *i915)
  {
-	struct intel_overlay *overlay = dev_priv->overlay;
+	struct intel_overlay *overlay = i915->overlay;
if (!overlay)
  		return;
@@ -540,11 +540,11 @@ static int uv_vsubsampling(u32 format)
  	}
  }
-static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+static u32 calc_swidthsw(struct drm_i915_private *i915, u32 offset, u32 width)
  {
  	u32 sw;
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		sw = ALIGN((offset & 31) + width, 32);
  	else
  		sw = ALIGN((offset & 63) + width, 64);
@@ -749,21 +749,21 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  				      struct drm_intel_overlay_put_image *params)
  {
  	struct overlay_registers __iomem *regs = overlay->regs;
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	u32 swidth, swidthsw, sheight, ostride;
  	enum pipe pipe = overlay->crtc->pipe;
  	bool scale_changed = false;
  	struct i915_vma *vma;
  	int ret, tmp_width;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	WARN_ON(!drm_modeset_is_locked(&i915->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
  	if (ret != 0)
  		return ret;
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+	atomic_inc(&i915->gpu_error.pending_fb_pin);
i915_gem_object_lock(new_bo);
  	vma = i915_gem_object_pin_to_display_plane(new_bo,
@@ -783,7 +783,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  		u32 oconfig;
oconfig = OCONF_CC_OUT_8BIT;
-		if (IS_GEN(dev_priv, 4))
+		if (IS_GEN(i915, 4))
  			oconfig |= OCONF_CSC_MODE_BT709;
  		oconfig |= pipe == 0 ?
  			OCONF_PIPE_A : OCONF_PIPE_B;
@@ -804,7 +804,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  		tmp_width = params->src_width;
swidth = params->src_width;
-	swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+	swidthsw = calc_swidthsw(i915, params->offset_Y, tmp_width);
  	sheight = params->src_height;
  	iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
  	ostride = params->stride_Y;
@@ -817,9 +817,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  		swidth |= (params->src_width / uv_hscale) << 16;
  		sheight |= (params->src_height / uv_vscale) << 16;
- tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+		tmp_U = calc_swidthsw(i915, params->offset_U,
  				      params->src_width / uv_hscale);
-		tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+		tmp_V = calc_swidthsw(i915, params->offset_V,
  				      params->src_width / uv_hscale);
  		swidthsw |= max(tmp_U, tmp_V) << 16;
@@ -851,18 +851,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
  out_unpin:
  	i915_gem_object_unpin_from_display_plane(vma);
  out_pin_section:
-	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+	atomic_dec(&i915->gpu_error.pending_fb_pin);
return ret;
  }
int intel_overlay_switch_off(struct intel_overlay *overlay)
  {
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	WARN_ON(!drm_modeset_is_locked(&i915->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
  	if (ret != 0)
@@ -895,14 +895,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
  {
-	struct drm_i915_private *dev_priv = overlay->i915;
+	struct drm_i915_private *i915 = overlay->i915;
  	u32 pfit_control = I915_READ(PFIT_CONTROL);
  	u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
  	 * line with the intel documentation for the i965
  	 */
-	if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		/* on i965 use the PGM reg to read out the autoscaler values */
  		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
  	} else {
@@ -947,7 +947,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
  	return 0;
  }
-static int check_overlay_src(struct drm_i915_private *dev_priv,
+static int check_overlay_src(struct drm_i915_private *i915,
  			     struct drm_intel_overlay_put_image *rec,
  			     struct drm_i915_gem_object *new_bo)
  {
@@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
  	u32 tmp;
/* check src dimensions */
-	if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+	if (IS_I845G(i915) || IS_I830(i915)) {
  		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
  		    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
  			return -EINVAL;
@@ -1010,14 +1010,14 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
  		return -EINVAL;
/* stride checking */
-	if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+	if (IS_I830(i915) || IS_I845G(i915))
  		stride_mask = 255;
  	else
  		stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
  		return -EINVAL;
-	if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
+	if (IS_GEN(i915, 4) && rec->stride_Y < 512)
  		return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1062,14 +1062,14 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
  				  struct drm_file *file_priv)
  {
  	struct drm_intel_overlay_put_image *params = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_overlay *overlay;
  	struct drm_crtc *drmmode_crtc;
  	struct intel_crtc *crtc;
  	struct drm_i915_gem_object *new_bo;
  	int ret;
- overlay = dev_priv->overlay;
+	overlay = i915->overlay;
  	if (!overlay) {
  		DRM_DEBUG("userspace bug: no overlay\n");
  		return -ENODEV;
@@ -1148,7 +1148,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
  		goto out_unlock;
  	}
- ret = check_overlay_src(dev_priv, params, new_bo);
+	ret = check_overlay_src(i915, params, new_bo);
  	if (ret != 0)
  		goto out_unlock;
@@ -1231,11 +1231,11 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
  			      struct drm_file *file_priv)
  {
  	struct drm_intel_overlay_attrs *attrs = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_overlay *overlay;
  	int ret;
- overlay = dev_priv->overlay;
+	overlay = i915->overlay;
  	if (!overlay) {
  		DRM_DEBUG("userspace bug: no overlay\n");
  		return -ENODEV;
@@ -1251,7 +1251,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
  		attrs->contrast   = overlay->contrast;
  		attrs->saturation = overlay->saturation;
- if (!IS_GEN(dev_priv, 2)) {
+		if (!IS_GEN(i915, 2)) {
  			attrs->gamma0 = I915_READ(OGAMC0);
  			attrs->gamma1 = I915_READ(OGAMC1);
  			attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,7 +1275,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
  		update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-			if (IS_GEN(dev_priv, 2))
+			if (IS_GEN(i915, 2))
  				goto out_unlock;
if (overlay->active) {
@@ -1351,19 +1351,19 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
  	return err;
  }
-void intel_overlay_setup(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *i915)
  {
  	struct intel_overlay *overlay;
  	int ret;
- if (!HAS_OVERLAY(dev_priv))
+	if (!HAS_OVERLAY(i915))
  		return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
  	if (!overlay)
  		return;
- overlay->i915 = dev_priv;
+	overlay->i915 = i915;
overlay->color_key = 0x0101fe;
  	overlay->color_key_enabled = true;
@@ -1373,7 +1373,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
INIT_ACTIVE_REQUEST(&overlay->last_flip); - ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+	ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(i915));
  	if (ret)
  		goto out_free;
@@ -1387,7 +1387,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
  	update_polyphase_filter(overlay->regs);
  	update_reg_attrs(overlay, overlay->regs);
- dev_priv->overlay = overlay;
+	i915->overlay = overlay;
  	DRM_INFO("Initialized overlay support.\n");
  	return;
@@ -1397,11 +1397,11 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
  	kfree(overlay);
  }
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *i915)
  {
  	struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->overlay);
+	overlay = fetch_and_zero(&i915->overlay);
  	if (!overlay)
  		return;
@@ -1427,9 +1427,9 @@ struct intel_overlay_error_state {
  };
struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
+intel_overlay_capture_error_state(struct drm_i915_private *i915)
  {
-	struct intel_overlay *overlay = dev_priv->overlay;
+	struct intel_overlay *overlay = i915->overlay;
  	struct intel_overlay_error_state *error;
if (!overlay || !overlay->active)
diff --git a/drivers/gpu/drm/i915/intel_overlay.h b/drivers/gpu/drm/i915/intel_overlay.h
index a167c28acd27..2e8771cffee9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.h
+++ b/drivers/gpu/drm/i915/intel_overlay.h
@@ -13,16 +13,16 @@ struct drm_i915_private;
  struct intel_overlay;
  struct intel_overlay_error_state;
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *i915);
+void intel_overlay_cleanup(struct drm_i915_private *i915);
  int intel_overlay_switch_off(struct intel_overlay *overlay);
  int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
  				  struct drm_file *file_priv);
  int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
  			      struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
+void intel_overlay_reset(struct drm_i915_private *i915);
  struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
+intel_overlay_capture_error_state(struct drm_i915_private *i915);
  void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
  				     struct intel_overlay_error_state *error);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 9cd4e37e3934..d80115089445 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -65,7 +65,7 @@ struct drm_display_mode *
  intel_panel_edid_downclock_mode(struct intel_connector *connector,
  				const struct drm_display_mode *fixed_mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	const struct drm_display_mode *scan, *best_mode = NULL;
  	struct drm_display_mode *downclock_mode;
  	int best_clock = fixed_mode->clock;
@@ -92,7 +92,7 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
  	if (!best_mode)
  		return NULL;
- downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
+	downclock_mode = drm_mode_duplicate(&i915->drm, best_mode);
  	if (!downclock_mode)
  		return NULL;
@@ -106,7 +106,7 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
  struct drm_display_mode *
  intel_panel_edid_fixed_mode(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	const struct drm_display_mode *scan;
  	struct drm_display_mode *fixed_mode;
@@ -118,7 +118,7 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
  		if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
  			continue;
- fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+		fixed_mode = drm_mode_duplicate(&i915->drm, scan);
  		if (!fixed_mode)
  			return NULL;
@@ -132,7 +132,7 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
  	scan = list_first_entry(&connector->base.probed_modes,
  				typeof(*scan), head);
- fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+	fixed_mode = drm_mode_duplicate(&i915->drm, scan);
  	if (!fixed_mode)
  		return NULL;
@@ -148,15 +148,15 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
  struct drm_display_mode *
  intel_panel_vbt_fixed_mode(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct drm_display_info *info = &connector->base.display_info;
  	struct drm_display_mode *fixed_mode;
- if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+	if (!i915->vbt.lfp_lvds_vbt_mode)
  		return NULL;
- fixed_mode = drm_mode_duplicate(&dev_priv->drm,
-					dev_priv->vbt.lfp_lvds_vbt_mode);
+	fixed_mode = drm_mode_duplicate(&i915->drm,
+					i915->vbt.lfp_lvds_vbt_mode);
  	if (!fixed_mode)
  		return NULL;
@@ -378,7 +378,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
  			      struct intel_crtc_state *pipe_config,
  			      int fitting_mode)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
  	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -399,7 +399,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
  		break;
  	case DRM_MODE_SCALE_ASPECT:
  		/* Scale but preserve the aspect ratio */
-		if (INTEL_GEN(dev_priv) >= 4)
+		if (INTEL_GEN(i915) >= 4)
  			i965_scale_aspect(pipe_config, &pfit_control);
  		else
  			i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -413,7 +413,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
  		if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
  		    pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
  			pfit_control |= PFIT_ENABLE;
-			if (INTEL_GEN(dev_priv) >= 4)
+			if (INTEL_GEN(i915) >= 4)
  				pfit_control |= PFIT_SCALING_AUTO;
  			else
  				pfit_control |= (VERT_AUTO_SCALE |
@@ -429,7 +429,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
  	/* FIXME: handle multiple panels by failing gracefully */
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
  				 PFIT_FILTER_FUZZY);
@@ -440,7 +440,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
  	}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
-	if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
+	if (INTEL_GEN(i915) < 4 && pipe_config->pipe_bpp == 18)
  		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -517,7 +517,7 @@ static inline u32 scale_hw_to_user(struct intel_connector *connector,
  static u32 intel_panel_compute_brightness(struct intel_connector *connector,
  					  u32 val)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
WARN_ON(panel->backlight.max == 0);
@@ -526,7 +526,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
  		return val;
if (i915_modparams.invert_brightness > 0 ||
-	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+	    i915->quirks & QUIRK_INVERT_BRIGHTNESS) {
  		return panel->backlight.max - val + panel->backlight.min;
  	}
@@ -535,39 +535,39 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, static u32 lpt_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
  }
static u32 pch_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
  }
static u32 i9xx_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 val;
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-	if (INTEL_GEN(dev_priv) < 4)
+	if (INTEL_GEN(i915) < 4)
  		val >>= 1;
if (panel->backlight.combination_mode) {
  		u8 lbpc;
- pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
+		pci_read_config_byte(i915->drm.pdev, LBPC, &lbpc);
  		val *= lbpc;
  	}
return val;
  }
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 _vlv_get_backlight(struct drm_i915_private *i915, enum pipe pipe)
  {
  	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
  		return 0;
@@ -577,15 +577,15 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
static u32 vlv_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum pipe pipe = intel_connector_get_pipe(connector);
- return _vlv_get_backlight(dev_priv, pipe);
+	return _vlv_get_backlight(i915, pipe);
  }
static u32 bxt_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
@@ -603,7 +603,7 @@ static u32 pwm_get_backlight(struct intel_connector *connector)
  static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
  	I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
@@ -612,7 +612,7 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32
  static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	u32 tmp;
tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -622,7 +622,7 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32
  static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 tmp, mask;
@@ -633,10 +633,10 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 lbpc = level * 0xfe / panel->backlight.max + 1;
  		level /= lbpc;
-		pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
+		pci_write_config_byte(i915->drm.pdev, LBPC, lbpc);
  	}
- if (IS_GEN(dev_priv, 4)) {
+	if (IS_GEN(i915, 4)) {
  		mask = BACKLIGHT_DUTY_CYCLE_MASK;
  	} else {
  		level <<= 1;
@@ -650,7 +650,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
  static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
  	u32 tmp;
@@ -661,7 +661,7 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32
  static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
@@ -694,7 +694,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
  				    u32 user_level, u32 user_max)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 hw_level;
@@ -707,7 +707,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
  	if (!panel->backlight.present || !conn_state->crtc)
  		return;
- mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
WARN_ON(panel->backlight.max == 0); @@ -723,13 +723,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
  	if (panel->backlight.enabled)
  		intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
  }
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	u32 tmp;
intel_panel_actually_set_backlight(old_conn_state, 0);
@@ -755,7 +755,7 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
  static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	u32 tmp;
intel_panel_actually_set_backlight(old_conn_state, 0);
@@ -774,7 +774,7 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
+	struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
  	u32 tmp;
intel_panel_actually_set_backlight(old_conn_state, 0);
@@ -786,7 +786,7 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st
  static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
  	u32 tmp;
@@ -799,7 +799,7 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
  static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 tmp, val;
@@ -819,7 +819,7 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
  static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 tmp;
@@ -844,7 +844,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
  void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
@@ -856,26 +856,26 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
  	 * backlight. This will leave the backlight on unnecessarily when
  	 * another client is not activated.
  	 */
-	if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
  		DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
  		return;
  	}
- mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
if (panel->backlight.device)
  		panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
  	panel->backlight.enabled = false;
  	panel->backlight.disable(old_conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
  }
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 pch_ctl1, pch_ctl2, schicken;
@@ -886,7 +886,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
  		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
  	}
- if (HAS_PCH_LPT(dev_priv)) {
+	if (HAS_PCH_LPT(i915)) {
  		schicken = I915_READ(SOUTH_CHICKEN2);
  		if (panel->backlight.alternate_pwm_increment)
  			schicken |= LPT_PWM_GRANULARITY;
@@ -910,7 +910,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
  		pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
-	if (HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_LPT(i915))
  		pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
@@ -925,7 +925,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
@@ -971,7 +971,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 ctl, freq;
@@ -988,7 +988,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
  	ctl = freq << 17;
  	if (panel->backlight.combination_mode)
  		ctl |= BLM_LEGACY_MODE;
-	if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+	if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm)
  		ctl |= BLM_POLARITY_PNV;
I915_WRITE(BLC_PWM_CTL, ctl);
@@ -1002,7 +1002,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
  	 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
  	 * that has backlight.
  	 */
-	if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
  }
@@ -1010,7 +1010,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
  	u32 ctl, ctl2, freq;
@@ -1045,7 +1045,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
  	u32 ctl, ctl2;
@@ -1075,7 +1075,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
  	u32 pwm_ctl, val;
@@ -1123,7 +1123,7 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
  				 const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 pwm_ctl;
@@ -1187,7 +1187,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
@@ -1196,28 +1196,28 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); - mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
__intel_panel_enable_backlight(crtc_state, conn_state); - mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
  }
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
  static u32 intel_panel_get_backlight(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 val = 0;
- mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
if (panel->backlight.enabled) {
  		val = panel->backlight.get(connector);
  		val = intel_panel_compute_brightness(connector, val);
  	}
- mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
  	return val;
@@ -1228,14 +1228,14 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
  				      u32 user_level, u32 user_max)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 hw_level;
if (!panel->backlight.present)
  		return;
- mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
WARN_ON(panel->backlight.max == 0); @@ -1245,7 +1245,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
  	if (panel->backlight.enabled)
  		intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
  }
static int intel_backlight_device_update_status(struct backlight_device *bd)
@@ -1284,11 +1284,11 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
  {
  	struct intel_connector *connector = bl_get_data(bd);
  	struct drm_device *dev = connector->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	intel_wakeref_t wakeref;
  	int ret = 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		u32 hw_level;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
@@ -1378,9 +1378,9 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
   */
  static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
+	return DIV_ROUND_CLOSEST(KHz(i915->rawclk_freq), pwm_freq_hz);
  }
/*
@@ -1416,7 +1416,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
   */
  static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 mul, clock;
@@ -1425,7 +1425,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  	else
  		mul = 128;
- if (HAS_PCH_LPT_H(dev_priv))
+	if (HAS_PCH_LPT_H(i915))
  		clock = MHz(135); /* LPT:H */
  	else
  		clock = MHz(24); /* LPT:LP */
@@ -1439,9 +1439,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
   */
  static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
+	return DIV_ROUND_CLOSEST(KHz(i915->rawclk_freq), pwm_freq_hz * 128);
  }
/*
@@ -1454,13 +1454,13 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
   */
  static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	int clock;
- if (IS_PINEVIEW(dev_priv))
-		clock = KHz(dev_priv->rawclk_freq);
+	if (IS_PINEVIEW(i915))
+		clock = KHz(i915->rawclk_freq);
  	else
-		clock = KHz(dev_priv->cdclk.hw.cdclk);
+		clock = KHz(i915->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
  }
@@ -1472,13 +1472,13 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
   */
  static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	int clock;
- if (IS_G4X(dev_priv))
-		clock = KHz(dev_priv->rawclk_freq);
+	if (IS_G4X(i915))
+		clock = KHz(i915->rawclk_freq);
  	else
-		clock = KHz(dev_priv->cdclk.hw.cdclk);
+		clock = KHz(i915->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
  }
@@ -1490,17 +1490,17 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
   */
  static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	int mul, clock;
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
-		if (IS_CHERRYVIEW(dev_priv))
+		if (IS_CHERRYVIEW(i915))
  			clock = KHz(19200);
  		else
  			clock = MHz(25);
  		mul = 16;
  	} else {
-		clock = KHz(dev_priv->rawclk_freq);
+		clock = KHz(i915->rawclk_freq);
  		mul = 128;
  	}
@@ -1509,9 +1509,9 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 get_backlight_max_vbt(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
-	u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+	u16 pwm_freq_hz = i915->vbt.backlight.pwm_freq_hz;
  	u32 pwm;
if (!panel->backlight.hz_to_pwm) {
@@ -1542,7 +1542,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
   */
  static u32 get_backlight_min_vbt(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	int min;
@@ -1555,10 +1555,10 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
  	 * against this by letting the minimum be at most (arbitrarily chosen)
  	 * 25% of the max.
  	 */
-	min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
-	if (min != dev_priv->vbt.backlight.min_brightness) {
+	min = clamp_t(int, i915->vbt.backlight.min_brightness, 0, 64);
+	if (min != i915->vbt.backlight.min_brightness) {
  		DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
-			      dev_priv->vbt.backlight.min_brightness, min);
+			      i915->vbt.backlight.min_brightness, min);
  	}
/* vbt value is a coefficient in range [0..255] */
@@ -1567,12 +1567,12 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
  	bool alt, cpu_mode;
- if (HAS_PCH_LPT(dev_priv))
+	if (HAS_PCH_LPT(i915))
  		alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
  	else
  		alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
@@ -1596,7 +1596,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; - cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+	cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(i915) &&
  		   !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
  		   (cpu_ctl2 & BLM_PWM_ENABLE);
  	if (cpu_mode)
@@ -1622,7 +1622,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
@@ -1654,16 +1654,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 ctl, val;
ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+	if (IS_GEN(i915, 2) || IS_I915GM(i915) || IS_I945GM(i915))
  		panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv))
+	if (IS_PINEVIEW(i915))
  		panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.max = ctl >> 17;
@@ -1693,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 ctl, ctl2, val;
@@ -1727,7 +1727,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 ctl, ctl2, val;
@@ -1748,7 +1748,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe panel->backlight.min = get_backlight_min_vbt(connector); - val = _vlv_get_backlight(dev_priv, pipe);
+	val = _vlv_get_backlight(i915, pipe);
  	val = intel_panel_compute_brightness(connector, val);
  	panel->backlight.level = clamp(val, panel->backlight.min,
  				       panel->backlight.max);
@@ -1761,11 +1761,11 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
  static int
  bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 pwm_ctl, val;
- panel->backlight.controller = dev_priv->vbt.backlight.controller;
+	panel->backlight.controller = i915->vbt.backlight.controller;
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); @@ -1801,7 +1801,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
  static int
  cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
  	u32 pwm_ctl, val;
@@ -1881,28 +1881,28 @@ void intel_panel_update_backlight(struct intel_encoder *encoder,
  				  const struct drm_connector_state *conn_state)
  {
  	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
  		return;
- mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
  	if (!panel->backlight.enabled)
  		__intel_panel_enable_backlight(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
  }
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct intel_connector *intel_connector = to_intel_connector(connector);
  	struct intel_panel *panel = &intel_connector->panel;
  	int ret;
- if (!dev_priv->vbt.backlight.present) {
-		if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+	if (!i915->vbt.backlight.present) {
+		if (i915->quirks & QUIRK_BACKLIGHT_PRESENT) {
  			DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
  		} else {
  			DRM_DEBUG_KMS("no backlight present per VBT\n");
@@ -1915,9 +1915,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
  		return -ENODEV;
/* set level and max in panel struct */
-	mutex_lock(&dev_priv->backlight_lock);
+	mutex_lock(&i915->backlight_lock);
  	ret = panel->backlight.setup(intel_connector, pipe);
-	mutex_unlock(&dev_priv->backlight_lock);
+	mutex_unlock(&i915->backlight_lock);
if (ret) {
  		DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1950,7 +1950,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
  {
  	struct intel_connector *connector =
  		container_of(panel, struct intel_connector, panel);
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
  	    intel_dp_aux_init_backlight_funcs(connector) == 0)
@@ -1960,38 +1960,38 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
  	    intel_dsi_dcs_init_backlight_funcs(connector) == 0)
  		return;
- if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		panel->backlight.setup = bxt_setup_backlight;
  		panel->backlight.enable = bxt_enable_backlight;
  		panel->backlight.disable = bxt_disable_backlight;
  		panel->backlight.set = bxt_set_backlight;
  		panel->backlight.get = bxt_get_backlight;
  		panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
-	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+	} else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
  		panel->backlight.setup = cnp_setup_backlight;
  		panel->backlight.enable = cnp_enable_backlight;
  		panel->backlight.disable = cnp_disable_backlight;
  		panel->backlight.set = bxt_set_backlight;
  		panel->backlight.get = bxt_get_backlight;
  		panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
-	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
+	} else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) {
  		panel->backlight.setup = lpt_setup_backlight;
  		panel->backlight.enable = lpt_enable_backlight;
  		panel->backlight.disable = lpt_disable_backlight;
  		panel->backlight.set = lpt_set_backlight;
  		panel->backlight.get = lpt_get_backlight;
-		if (HAS_PCH_LPT(dev_priv))
+		if (HAS_PCH_LPT(i915))
  			panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
  		else
  			panel->backlight.hz_to_pwm = spt_hz_to_pwm;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
+	} else if (HAS_PCH_SPLIT(i915)) {
  		panel->backlight.setup = pch_setup_backlight;
  		panel->backlight.enable = pch_enable_backlight;
  		panel->backlight.disable = pch_disable_backlight;
  		panel->backlight.set = pch_set_backlight;
  		panel->backlight.get = pch_get_backlight;
  		panel->backlight.hz_to_pwm = pch_hz_to_pwm;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
  			panel->backlight.setup = pwm_setup_backlight;
  			panel->backlight.enable = pwm_enable_backlight;
@@ -2006,7 +2006,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
  			panel->backlight.get = vlv_get_backlight;
  			panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
  		}
-	} else if (IS_GEN(dev_priv, 4)) {
+	} else if (IS_GEN(i915, 4)) {
  		panel->backlight.setup = i965_setup_backlight;
  		panel->backlight.enable = i965_enable_backlight;
  		panel->backlight.disable = i965_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 1e2c4307d05a..4a4409f4076f 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -70,11 +70,11 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
  	return 0;
  }
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *i915,
  				     enum pipe pipe,
  				     enum intel_pipe_crc_source *source)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_encoder *encoder;
  	struct intel_crtc *crtc;
  	struct intel_digital_port *dig_port;
@@ -124,7 +124,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
  	return ret;
  }
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *i915,
  				enum pipe pipe,
  				enum intel_pipe_crc_source *source,
  				u32 *val)
@@ -132,7 +132,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  	bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(i915, pipe, source);
  		if (ret)
  			return ret;
  	}
@@ -150,7 +150,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  		need_stable_symbols = true;
  		break;
  	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_CHERRYVIEW(dev_priv))
+		if (!IS_CHERRYVIEW(i915))
  			return -EINVAL;
  		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
  		need_stable_symbols = true;
@@ -194,13 +194,13 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *i915,
  				 enum pipe pipe,
  				 enum intel_pipe_crc_source *source,
  				 u32 *val)
  {
  	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(i915, pipe, source);
  		if (ret)
  			return ret;
  	}
@@ -210,7 +210,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
  		break;
  	case INTEL_PIPE_CRC_SOURCE_TV:
-		if (!SUPPORTS_TV(dev_priv))
+		if (!SUPPORTS_TV(i915))
  			return -EINVAL;
  		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
  		break;
@@ -234,7 +234,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *i915,
  					 enum pipe pipe)
  {
  	u32 tmp = I915_READ(PORT_DFT2_G4X);
@@ -286,7 +286,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
  static void
  intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_crtc_state *pipe_config;
  	struct drm_atomic_state *state;
  	struct drm_modeset_acquire_ctx ctx;
@@ -294,7 +294,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(&dev_priv->drm);
+	state = drm_atomic_state_alloc(&i915->drm);
  	if (!state) {
  		ret = -ENOMEM;
  		goto unlock;
@@ -312,7 +312,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
  	pipe_config->base.mode_changed = pipe_config->has_psr;
  	pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+	if (IS_HASWELL(i915) &&
  	    pipe_config->base.active && crtc->pipe == PIPE_A &&
  	    pipe_config->cpu_transcoder == TRANSCODER_EDP)
  		pipe_config->base.mode_changed = true;
@@ -333,7 +333,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
  	drm_modeset_acquire_fini(&ctx);
  }
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *i915,
  				enum pipe pipe,
  				enum intel_pipe_crc_source *source,
  				u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *i915,
  				enum pipe pipe,
  				enum intel_pipe_crc_source *source,
  				u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
  	return 0;
  }
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct drm_i915_private *i915,
  			       enum pipe pipe,
  			       enum intel_pipe_crc_source *source, u32 *val)
  {
-	if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		return i8xx_pipe_crc_ctl_reg(source, val);
-	else if (INTEL_GEN(dev_priv) < 5)
-		return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-	else if (IS_GEN_RANGE(dev_priv, 5, 6))
+	else if (INTEL_GEN(i915) < 5)
+		return i9xx_pipe_crc_ctl_reg(i915, pipe, source, val);
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		return vlv_pipe_crc_ctl_reg(i915, pipe, source, val);
+	else if (IS_GEN_RANGE(i915, 5, 6))
  		return ilk_pipe_crc_ctl_reg(source, val);
-	else if (INTEL_GEN(dev_priv) < 9)
-		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+	else if (INTEL_GEN(i915) < 9)
+		return ivb_pipe_crc_ctl_reg(i915, pipe, source, val);
  	else
-		return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+		return skl_pipe_crc_ctl_reg(i915, pipe, source, val);
  }
static int
@@ -440,18 +440,18 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
  	return 0;
  }
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_display_crc_init(struct drm_i915_private *i915)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
-		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	for_each_pipe(i915, pipe) {
+		struct intel_pipe_crc *pipe_crc = &i915->pipe_crc[pipe];
spin_lock_init(&pipe_crc->lock);
  	}
  }
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct drm_i915_private *i915,
  				 const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -463,7 +463,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
  	}
  }
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct drm_i915_private *i915,
  				 const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -476,7 +476,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
  	}
  }
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct drm_i915_private *i915,
  				const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -491,7 +491,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
  	}
  }
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct drm_i915_private *i915,
  				const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -505,7 +505,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
  	}
  }
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct drm_i915_private *i915,
  				const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -519,7 +519,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
  	}
  }
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct drm_i915_private *i915,
  				const enum intel_pipe_crc_source source)
  {
  	switch (source) {
@@ -539,21 +539,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
  }
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct drm_i915_private *i915,
  			  const enum intel_pipe_crc_source source)
  {
-	if (IS_GEN(dev_priv, 2))
-		return i8xx_crc_source_valid(dev_priv, source);
-	else if (INTEL_GEN(dev_priv) < 5)
-		return i9xx_crc_source_valid(dev_priv, source);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		return vlv_crc_source_valid(dev_priv, source);
-	else if (IS_GEN_RANGE(dev_priv, 5, 6))
-		return ilk_crc_source_valid(dev_priv, source);
-	else if (INTEL_GEN(dev_priv) < 9)
-		return ivb_crc_source_valid(dev_priv, source);
+	if (IS_GEN(i915, 2))
+		return i8xx_crc_source_valid(i915, source);
+	else if (INTEL_GEN(i915) < 5)
+		return i9xx_crc_source_valid(i915, source);
+	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		return vlv_crc_source_valid(i915, source);
+	else if (IS_GEN_RANGE(i915, 5, 6))
+		return ilk_crc_source_valid(i915, source);
+	else if (INTEL_GEN(i915) < 9)
+		return ivb_crc_source_valid(i915, source);
  	else
-		return skl_crc_source_valid(dev_priv, source);
+		return skl_crc_source_valid(i915, source);
  }
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -566,7 +566,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
  int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
  				 size_t *values_cnt)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
@@ -575,7 +575,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
  	}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
-	    intel_is_valid_crc_source(dev_priv, source) == 0) {
+	    intel_is_valid_crc_source(i915, source) == 0) {
  		*values_cnt = 5;
  		return 0;
  	}
@@ -585,8 +585,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
+	struct intel_pipe_crc *pipe_crc = &i915->pipe_crc[crtc->index];
  	enum intel_display_power_domain power_domain;
  	enum intel_pipe_crc_source source;
  	intel_wakeref_t wakeref;
@@ -600,7 +600,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  	}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref) {
  		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
  		return -EIO;
@@ -610,7 +610,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  	if (enable)
  		intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+	ret = get_new_crc_ctl_reg(i915, crtc->index, &source, &val);
  	if (ret != 0)
  		goto out;
@@ -619,8 +619,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  	POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+			vlv_undo_pipe_scramble_reset(i915, crtc->index);
  	}
pipe_crc->skipped = 0;
@@ -629,7 +629,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  	if (!enable)
  		intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -637,14 +637,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
  void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
  {
  	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
+	struct intel_pipe_crc *pipe_crc = &i915->pipe_crc[crtc->index];
  	u32 val = 0;
if (!crtc->crc.opened)
  		return;
- if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
+	if (get_new_crc_ctl_reg(i915, crtc->index, &pipe_crc->source, &val) < 0)
  		return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
@@ -657,8 +657,8 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
  void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
  {
  	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
+	struct intel_pipe_crc *pipe_crc = &i915->pipe_crc[crtc->index];
/* Swallow crc's until we stop generating them. */
  	spin_lock_irq(&pipe_crc->lock);
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
  	POSTING_READ(PIPE_CRC_CTL(crtc->index));
-	synchronize_irq(dev_priv->drm.irq);
+	synchronize_irq(i915->drm.irq);
  }
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
index db258a756fc6..42048ab1391a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
  struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_display_crc_init(struct drm_i915_private *i915);
  int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
  int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
  				 const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
  void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
  void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
  #else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_display_crc_init(struct drm_i915_private *i915) {}
  #define intel_crtc_set_crc_source NULL
  #define intel_crtc_verify_crc_source NULL
  #define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2c7f3ebc0117..4b939160cd95 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -63,9 +63,9 @@
   * require higher latency to switch to and wake up.
   */
-static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen9_init_clock_gating(struct drm_i915_private *i915)
  {
-	if (HAS_LLC(dev_priv)) {
+	if (HAS_LLC(i915)) {
  		/*
  		 * WaCompressedResourceDisplayNewHashMode:skl,kbl
  		 * Display WA #0390: skl,kbl
@@ -96,16 +96,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
  		   ILK_DPFC_DISABLE_DUMMY0);
- if (IS_SKYLAKE(dev_priv)) {
+	if (IS_SKYLAKE(i915)) {
  		/* WaDisableDopClockGating */
  		I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
  			   & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  	}
  }
-static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
+static void bxt_init_clock_gating(struct drm_i915_private *i915)
  {
-	gen9_init_clock_gating(dev_priv);
+	gen9_init_clock_gating(i915);
/* WaDisableSDEUnitClockGating:bxt */
  	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -126,9 +126,9 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
  		   PWM1_GATING_DIS | PWM2_GATING_DIS);
  }
-static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
+static void glk_init_clock_gating(struct drm_i915_private *i915)
  {
-	gen9_init_clock_gating(dev_priv);
+	gen9_init_clock_gating(i915);
/*
  	 * WaDisablePWMClockGating:glk
@@ -139,7 +139,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
  		   PWM1_GATING_DIS | PWM2_GATING_DIS);
/* WaDDIIOTimeout:glk */
-	if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
+	if (IS_GLK_REVID(i915, 0, GLK_REVID_A1)) {
  		u32 val = I915_READ(CHICKEN_MISC_2);
  		val &= ~(GLK_CL0_PWR_DOWN |
  			 GLK_CL1_PWR_DOWN |
@@ -149,7 +149,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
} -static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *i915)
  {
  	u32 tmp;
@@ -157,100 +157,100 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) switch (tmp & CLKCFG_FSB_MASK) {
  	case CLKCFG_FSB_533:
-		dev_priv->fsb_freq = 533; /* 133*4 */
+		i915->fsb_freq = 533; /* 133*4 */
  		break;
  	case CLKCFG_FSB_800:
-		dev_priv->fsb_freq = 800; /* 200*4 */
+		i915->fsb_freq = 800; /* 200*4 */
  		break;
  	case CLKCFG_FSB_667:
-		dev_priv->fsb_freq =  667; /* 167*4 */
+		i915->fsb_freq =  667; /* 167*4 */
  		break;
  	case CLKCFG_FSB_400:
-		dev_priv->fsb_freq = 400; /* 100*4 */
+		i915->fsb_freq = 400; /* 100*4 */
  		break;
  	}
switch (tmp & CLKCFG_MEM_MASK) {
  	case CLKCFG_MEM_533:
-		dev_priv->mem_freq = 533;
+		i915->mem_freq = 533;
  		break;
  	case CLKCFG_MEM_667:
-		dev_priv->mem_freq = 667;
+		i915->mem_freq = 667;
  		break;
  	case CLKCFG_MEM_800:
-		dev_priv->mem_freq = 800;
+		i915->mem_freq = 800;
  		break;
  	}
/* detect pineview DDR3 setting */
  	tmp = I915_READ(CSHRDDR3CTL);
-	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+	i915->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  }
-static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *i915)
  {
  	u16 ddrpll, csipll;
- ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
-	csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
+	ddrpll = intel_uncore_read16(&i915->uncore, DDRMPLL1);
+	csipll = intel_uncore_read16(&i915->uncore, CSIPLL0);
switch (ddrpll & 0xff) {
  	case 0xc:
-		dev_priv->mem_freq = 800;
+		i915->mem_freq = 800;
  		break;
  	case 0x10:
-		dev_priv->mem_freq = 1066;
+		i915->mem_freq = 1066;
  		break;
  	case 0x14:
-		dev_priv->mem_freq = 1333;
+		i915->mem_freq = 1333;
  		break;
  	case 0x18:
-		dev_priv->mem_freq = 1600;
+		i915->mem_freq = 1600;
  		break;
  	default:
  		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  				 ddrpll & 0xff);
-		dev_priv->mem_freq = 0;
+		i915->mem_freq = 0;
  		break;
  	}
- dev_priv->ips.r_t = dev_priv->mem_freq;
+	i915->ips.r_t = i915->mem_freq;
switch (csipll & 0x3ff) {
  	case 0x00c:
-		dev_priv->fsb_freq = 3200;
+		i915->fsb_freq = 3200;
  		break;
  	case 0x00e:
-		dev_priv->fsb_freq = 3733;
+		i915->fsb_freq = 3733;
  		break;
  	case 0x010:
-		dev_priv->fsb_freq = 4266;
+		i915->fsb_freq = 4266;
  		break;
  	case 0x012:
-		dev_priv->fsb_freq = 4800;
+		i915->fsb_freq = 4800;
  		break;
  	case 0x014:
-		dev_priv->fsb_freq = 5333;
+		i915->fsb_freq = 5333;
  		break;
  	case 0x016:
-		dev_priv->fsb_freq = 5866;
+		i915->fsb_freq = 5866;
  		break;
  	case 0x018:
-		dev_priv->fsb_freq = 6400;
+		i915->fsb_freq = 6400;
  		break;
  	default:
  		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  				 csipll & 0x3ff);
-		dev_priv->fsb_freq = 0;
+		i915->fsb_freq = 0;
  		break;
  	}
- if (dev_priv->fsb_freq == 3200) {
-		dev_priv->ips.c_m = 0;
-	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-		dev_priv->ips.c_m = 1;
+	if (i915->fsb_freq == 3200) {
+		i915->ips.c_m = 0;
+	} else if (i915->fsb_freq > 3200 && i915->fsb_freq <= 4800) {
+		i915->ips.c_m = 1;
  	} else {
-		dev_priv->ips.c_m = 2;
+		i915->ips.c_m = 2;
  	}
  }
@@ -316,61 +316,61 @@ static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
  	return NULL;
  }
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct drm_i915_private *i915, bool enable)
  {
  	u32 val;
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+	val = vlv_punit_read(i915, PUNIT_REG_DDR_SETUP2);
  	if (enable)
  		val &= ~FORCE_DDR_HIGH_FREQ;
  	else
  		val |= FORCE_DDR_HIGH_FREQ;
  	val &= ~FORCE_DDR_LOW_FREQ;
  	val |= FORCE_DDR_FREQ_REQ_ACK;
-	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+	vlv_punit_write(i915, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+	if (wait_for((vlv_punit_read(i915, PUNIT_REG_DDR_SETUP2) &
  		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
  		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
  }
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct drm_i915_private *i915, bool enable)
  {
  	u32 val;
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+	val = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
  	if (enable)
  		val |= DSP_MAXFIFO_PM5_ENABLE;
  	else
  		val &= ~DSP_MAXFIFO_PM5_ENABLE;
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+	vlv_punit_write(i915, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
  }
#define FW_WM(value, plane) \
  	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
  {
  	bool was_enabled;
  	u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
  		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
  		POSTING_READ(FW_BLC_SELF_VLV);
-	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+	} else if (IS_G4X(i915) || IS_I965GM(i915)) {
  		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
  		POSTING_READ(FW_BLC_SELF);
-	} else if (IS_PINEVIEW(dev_priv)) {
+	} else if (IS_PINEVIEW(i915)) {
  		val = I915_READ(DSPFW3);
  		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
  		if (enable)
@@ -379,13 +379,13 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
  			val &= ~PINEVIEW_SELF_REFRESH_EN;
  		I915_WRITE(DSPFW3, val);
  		POSTING_READ(DSPFW3);
-	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+	} else if (IS_I945G(i915) || IS_I945GM(i915)) {
  		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
  			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
  		I915_WRITE(FW_BLC_SELF, val);
  		POSTING_READ(FW_BLC_SELF);
-	} else if (IS_I915GM(dev_priv)) {
+	} else if (IS_I915GM(i915)) {
  		/*
  		 * FIXME can't find a bit like this for 915G, and
  		 * and yet it does have the related watermark in
@@ -400,7 +400,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
  		return false;
  	}
- trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+	trace_intel_memory_cxsr(i915, was_enabled, enable);
DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
  		      enableddisabled(enable),
@@ -411,7 +411,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
   * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @enable: Allow vs. disallow CxSR
   *
   * Allow or disallow the system to enter a special CxSR
@@ -446,17 +446,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
   * the hardware w.r.t. HPLL SR when writing to plane registers.
   * Disallowing just CxSR is sufficient.
   */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
  {
  	bool ret;
- mutex_lock(&dev_priv->wm.wm_mutex);
-	ret = _intel_set_memory_cxsr(dev_priv, enable);
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->wm.vlv.cxsr = enable;
-	else if (IS_G4X(dev_priv))
-		dev_priv->wm.g4x.cxsr = enable;
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
+	ret = _intel_set_memory_cxsr(i915, enable);
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		i915->wm.vlv.cxsr = enable;
+	else if (IS_G4X(i915))
+		i915->wm.g4x.cxsr = enable;
+	mutex_unlock(&i915->wm.wm_mutex);
return ret;
  }
@@ -483,7 +483,7 @@ static const int pessimal_latency_ns = 5000;
  static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
  	enum pipe pipe = crtc->pipe;
  	int sprite0_start, sprite1_start;
@@ -519,7 +519,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
  	fifo_state->plane[PLANE_CURSOR] = 63;
  }
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct drm_i915_private *i915,
  			      enum i9xx_plane_id i9xx_plane)
  {
  	u32 dsparb = I915_READ(DSPARB);
@@ -535,7 +535,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
  	return size;
  }
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct drm_i915_private *i915,
  			      enum i9xx_plane_id i9xx_plane)
  {
  	u32 dsparb = I915_READ(DSPARB);
@@ -552,7 +552,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
  	return size;
  }
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct drm_i915_private *i915,
  			      enum i9xx_plane_id i9xx_plane)
  {
  	u32 dsparb = I915_READ(DSPARB);
@@ -805,9 +805,9 @@ static bool is_enabling(int old, int new, int threshold)
  	return old < threshold && new >= threshold;
  }
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
+static int intel_wm_num_levels(struct drm_i915_private *i915)
  {
-	return dev_priv->wm.max_level + 1;
+	return i915->wm.max_level + 1;
  }
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -833,11 +833,11 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
  		return plane_state->base.visible;
  }
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *i915)
  {
  	struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		if (intel_crtc_active(crtc)) {
  			if (enabled)
  				return NULL;
@@ -850,23 +850,23 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
static void pineview_update_wm(struct intel_crtc *unused_crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(unused_crtc->base.dev);
  	struct intel_crtc *crtc;
  	const struct cxsr_latency *latency;
  	u32 reg;
  	unsigned int wm;
- latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-					 dev_priv->is_ddr3,
-					 dev_priv->fsb_freq,
-					 dev_priv->mem_freq);
+	latency = intel_get_cxsr_latency(!IS_MOBILE(i915),
+					 i915->is_ddr3,
+					 i915->fsb_freq,
+					 i915->mem_freq);
  	if (!latency) {
  		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-		intel_set_memory_cxsr(dev_priv, false);
+		intel_set_memory_cxsr(i915, false);
  		return;
  	}
- crtc = single_enabled_crtc(dev_priv);
+	crtc = single_enabled_crtc(i915);
  	if (crtc) {
  		const struct drm_display_mode *adjusted_mode =
  			&crtc->config->base.adjusted_mode;
@@ -913,9 +913,9 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
  		I915_WRITE(DSPFW3, reg);
  		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+		intel_set_memory_cxsr(i915, true);
  	} else {
-		intel_set_memory_cxsr(dev_priv, false);
+		intel_set_memory_cxsr(i915, false);
  	}
  }
@@ -936,13 +936,13 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
  	return max(0, tlb_miss);
  }
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct drm_i915_private *i915,
  				const struct g4x_wm_values *wm)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
-		trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
+	for_each_pipe(i915, pipe)
+		trace_g4x_wm(intel_get_crtc_for_pipe(i915, pipe), wm);
I915_WRITE(DSPFW1,
  		   FW_WM(wm->sr.plane, SR) |
@@ -968,13 +968,13 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
  #define FW_WM_VLV(value, plane) \
  	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct drm_i915_private *i915,
  				const struct vlv_wm_values *wm)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
-		trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
+	for_each_pipe(i915, pipe) {
+		trace_vlv_wm(intel_get_crtc_for_pipe(i915, pipe), wm);
I915_WRITE(VLV_DDL(pipe),
  			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
@@ -1006,7 +1006,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
  	I915_WRITE(DSPFW3,
  		   FW_WM(wm->sr.cursor, CURSOR_SR));
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		I915_WRITE(DSPFW7_CHV,
  			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
  			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
@@ -1046,14 +1046,14 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
#undef FW_WM_VLV -static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct drm_i915_private *i915)
  {
  	/* all latencies in usec */
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+	i915->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+	i915->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+	i915->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+	i915->wm.max_level = G4X_WM_LEVEL_HPLL;
  }
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1103,10 +1103,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
  			  int level)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_display_mode *adjusted_mode =
  		&crtc_state->base.adjusted_mode;
-	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+	unsigned int latency = i915->wm.pri_latency[level] * 10;
  	unsigned int clock, htotal, cpp, width, wm;
if (latency == 0)
@@ -1126,7 +1126,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
  	 * the HPLL watermark, which seems a little strange.
  	 * Let's use 32bpp for the HPLL watermark as well.
  	 */
-	if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
+	if (IS_GM45(i915) && plane->id == PLANE_PRIMARY &&
  	    level != G4X_WM_LEVEL_NORMAL)
  		cpp = 4;
  	else
@@ -1165,10 +1165,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
  static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
  				 int level, enum plane_id plane_id, u16 value)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	bool dirty = false;
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < intel_wm_num_levels(i915); level++) {
  		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1181,13 +1181,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
  static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
  			       int level, u16 value)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
  	level = max(level, G4X_WM_LEVEL_SR);
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < intel_wm_num_levels(i915); level++) {
  		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1283,9 +1283,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
  static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
  				     int level)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- if (level > dev_priv->wm.max_level)
+	if (level > i915->wm.max_level)
  		return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1484,7 +1484,7 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
  	return 0;
  }
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct drm_i915_private *i915,
  			 struct g4x_wm_values *wm)
  {
  	struct intel_crtc *crtc;
@@ -1494,7 +1494,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
  	wm->hpll_en = true;
  	wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1516,7 +1516,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
  		wm->fbc_en = false;
  	}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
  		enum pipe pipe = crtc->pipe;
@@ -1528,23 +1528,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
  	}
  }
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct drm_i915_private *i915)
  {
-	struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
+	struct g4x_wm_values *old_wm = &i915->wm.g4x;
  	struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+	g4x_merge_wm(i915, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
  		return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, false);
+		_intel_set_memory_cxsr(i915, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+	g4x_write_wm_values(i915, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, true);
+		_intel_set_memory_cxsr(i915, true);
*old_wm = new_wm;
  }
@@ -1552,28 +1552,28 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
  static void g4x_initial_watermarks(struct intel_atomic_state *state,
  				   struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
-	g4x_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	g4x_program_watermarks(i915);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
  				    struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
  		return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
-	g4x_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	g4x_program_watermarks(i915);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
/* latency must be in 0.1us units. */
@@ -1592,18 +1592,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
  	return ret;
  }
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct drm_i915_private *i915)
  {
  	/* all latencies in usec */
-	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+	i915->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+	i915->wm.max_level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+	if (IS_CHERRYVIEW(i915)) {
+		i915->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+		i915->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+		i915->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
  	}
  }
@@ -1612,12 +1612,12 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
  				int level)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_display_mode *adjusted_mode =
  		&crtc_state->base.adjusted_mode;
  	unsigned int clock, htotal, cpp, width, wm;
- if (dev_priv->wm.pri_latency[level] == 0)
+	if (i915->wm.pri_latency[level] == 0)
  		return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1638,7 +1638,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
  		wm = 63;
  	} else {
  		wm = vlv_wm_method2(clock, htotal, width, cpp,
-				    dev_priv->wm.pri_latency[level] * 10);
+				    i915->wm.pri_latency[level] * 10);
  	}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1736,9 +1736,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
  static void vlv_invalidate_wms(struct intel_crtc *crtc,
  			       struct vlv_wm_state *wm_state, int level)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < intel_wm_num_levels(i915); level++) {
  		enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1764,8 +1764,8 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
  static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
  				 int level, enum plane_id plane_id, u16 value)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	int num_levels = intel_wm_num_levels(dev_priv);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
+	int num_levels = intel_wm_num_levels(i915);
  	bool dirty = false;
for (; level < num_levels; level++) {
@@ -1840,7 +1840,7 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
  static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_atomic_state *state =
  		to_intel_atomic_state(crtc_state->base.state);
  	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
@@ -1897,7 +1897,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
  	}
/* initially allow all levels */
-	wm_state->num_levels = intel_wm_num_levels(dev_priv);
+	wm_state->num_levels = intel_wm_num_levels(i915);
  	/*
  	 * Note that enabling cxsr with no primary/sprite planes
  	 * enabled can wedge the pipe. Hence we only allow cxsr
@@ -1907,7 +1907,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
  		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-		const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+		const int sr_fifo_size = INTEL_INFO(i915)->num_pipes * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
  			break;
@@ -1948,8 +1948,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
  				   struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	struct intel_uncore *uncore = &i915->uncore;
  	const struct vlv_fifo_state *fifo_state =
  		&crtc_state->wm.vlv.fifo_state;
  	int sprite0_start, sprite1_start, fifo_size;
@@ -2092,16 +2092,16 @@ static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
  	return 0;
  }
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct drm_i915_private *i915,
  			 struct vlv_wm_values *wm)
  {
  	struct intel_crtc *crtc;
  	int num_active_crtcs = 0;
- wm->level = dev_priv->wm.max_level;
+	wm->level = i915->wm.max_level;
  	wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2120,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
  	if (num_active_crtcs > 1)
  		wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
  		enum pipe pipe = crtc->pipe;
@@ -2135,35 +2135,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
  	}
  }
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct drm_i915_private *i915)
  {
-	struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+	struct vlv_wm_values *old_wm = &i915->wm.vlv;
  	struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+	vlv_merge_wm(i915, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
  		return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
-		chv_set_memory_dvfs(dev_priv, false);
+		chv_set_memory_dvfs(i915, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
-		chv_set_memory_pm5(dev_priv, false);
+		chv_set_memory_pm5(i915, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, false);
+		_intel_set_memory_cxsr(i915, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+	vlv_write_wm_values(i915, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, true);
+		_intel_set_memory_cxsr(i915, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
-		chv_set_memory_pm5(dev_priv, true);
+		chv_set_memory_pm5(i915, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
-		chv_set_memory_dvfs(dev_priv, true);
+		chv_set_memory_dvfs(i915, true);
*old_wm = new_wm;
  }
@@ -2171,40 +2171,40 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
  static void vlv_initial_watermarks(struct intel_atomic_state *state,
  				   struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
-	vlv_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	vlv_program_watermarks(i915);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
  				    struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
  		return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
-	vlv_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	vlv_program_watermarks(i915);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
static void i965_update_wm(struct intel_crtc *unused_crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(unused_crtc->base.dev);
  	struct intel_crtc *crtc;
  	int srwm = 1;
  	int cursor_sr = 16;
  	bool cxsr_enabled;
/* Calc sr entries for one plane configs */
-	crtc = single_enabled_crtc(dev_priv);
+	crtc = single_enabled_crtc(i915);
  	if (crtc) {
  		/* self-refresh has much higher latency */
  		static const int sr_latency_ns = 12000;
@@ -2246,7 +2246,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
  	} else {
  		cxsr_enabled = false;
  		/* Turn off self refresh if both pipes are enabled */
-		intel_set_memory_cxsr(dev_priv, false);
+		intel_set_memory_cxsr(i915, false);
  	}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2263,14 +2263,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
  	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+		intel_set_memory_cxsr(i915, true);
  }
#undef FW_WM static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(unused_crtc->base.dev);
  	const struct intel_watermark_params *wm_info;
  	u32 fwater_lo;
  	u32 fwater_hi;
@@ -2279,15 +2279,15 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  	int planea_wm, planeb_wm;
  	struct intel_crtc *crtc, *enabled = NULL;
- if (IS_I945GM(dev_priv))
+	if (IS_I945GM(i915))
  		wm_info = &i945_wm_info;
-	else if (!IS_GEN(dev_priv, 2))
+	else if (!IS_GEN(i915, 2))
  		wm_info = &i915_wm_info;
  	else
  		wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
-	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
+	fifo_size = i915->display.get_fifo_size(i915, PLANE_A);
+	crtc = intel_get_crtc_for_plane(i915, PLANE_A);
  	if (intel_crtc_active(crtc)) {
  		const struct drm_display_mode *adjusted_mode =
  			&crtc->config->base.adjusted_mode;
@@ -2295,7 +2295,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  			crtc->base.primary->state->fb;
  		int cpp;
- if (IS_GEN(dev_priv, 2))
+		if (IS_GEN(i915, 2))
  			cpp = 4;
  		else
  			cpp = fb->format->cpp[0];
@@ -2310,11 +2310,11 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  			planea_wm = wm_info->max_wm;
  	}
- if (IS_GEN(dev_priv, 2))
+	if (IS_GEN(i915, 2))
  		wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
-	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
+	fifo_size = i915->display.get_fifo_size(i915, PLANE_B);
+	crtc = intel_get_crtc_for_plane(i915, PLANE_B);
  	if (intel_crtc_active(crtc)) {
  		const struct drm_display_mode *adjusted_mode =
  			&crtc->config->base.adjusted_mode;
@@ -2322,7 +2322,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  			crtc->base.primary->state->fb;
  		int cpp;
- if (IS_GEN(dev_priv, 2))
+		if (IS_GEN(i915, 2))
  			cpp = 4;
  		else
  			cpp = fb->format->cpp[0];
@@ -2342,7 +2342,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - if (IS_I915GM(dev_priv) && enabled) {
+	if (IS_I915GM(i915) && enabled) {
  		struct drm_i915_gem_object *obj;
obj = intel_fb_obj(enabled->base.primary->state->fb);
@@ -2358,10 +2358,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  	cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
-	intel_set_memory_cxsr(dev_priv, false);
+	intel_set_memory_cxsr(i915, false);
/* Calc sr entries for one plane configs */
-	if (HAS_FW_BLC(dev_priv) && enabled) {
+	if (HAS_FW_BLC(i915) && enabled) {
  		/* self-refresh has much higher latency */
  		static const int sr_latency_ns = 6000;
  		const struct drm_display_mode *adjusted_mode =
@@ -2374,7 +2374,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  		int cpp;
  		int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+		if (IS_I915GM(i915) || IS_I945GM(i915))
  			cpp = 4;
  		else
  			cpp = fb->format->cpp[0];
@@ -2387,7 +2387,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  		if (srwm < 0)
  			srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+		if (IS_I945G(i915) || IS_I945GM(i915))
  			I915_WRITE(FW_BLC_SELF,
  				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
  		else
@@ -2408,25 +2408,25 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
  	I915_WRITE(FW_BLC2, fwater_hi);
if (enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+		intel_set_memory_cxsr(i915, true);
  }
static void i845_update_wm(struct intel_crtc *unused_crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(unused_crtc->base.dev);
  	struct intel_crtc *crtc;
  	const struct drm_display_mode *adjusted_mode;
  	u32 fwater_lo;
  	int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+	crtc = single_enabled_crtc(i915);
  	if (crtc == NULL)
  		return;
adjusted_mode = &crtc->config->base.adjusted_mode;
  	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  				       &i845_wm_info,
-				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
+				       i915->display.get_fifo_size(i915, PLANE_A),
  				       4, pessimal_latency_ns);
  	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
  	fwater_lo |= (3<<8) | planea_wm;
@@ -2586,24 +2586,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
  }
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(const struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		return 3072;
-	else if (INTEL_GEN(dev_priv) >= 7)
+	else if (INTEL_GEN(i915) >= 7)
  		return 768;
  	else
  		return 512;
  }
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(const struct drm_i915_private *i915,
  		     int level, bool is_sprite)
  {
-	if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		/* BDW primary/sprite plane watermarks */
  		return level == 0 ? 255 : 2047;
-	else if (INTEL_GEN(dev_priv) >= 7)
+	else if (INTEL_GEN(i915) >= 7)
  		/* IVB/HSW primary/sprite plane watermarks */
  		return level == 0 ? 127 : 1023;
  	else if (!is_sprite)
@@ -2615,30 +2615,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
  }
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(const struct drm_i915_private *i915, int level)
  {
-	if (INTEL_GEN(dev_priv) >= 7)
+	if (INTEL_GEN(i915) >= 7)
  		return level == 0 ? 63 : 255;
  	else
  		return level == 0 ? 31 : 63;
  }
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *i915)
  {
-	if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		return 31;
  	else
  		return 15;
  }
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *i915,
  				     int level,
  				     const struct intel_wm_config *config,
  				     enum intel_ddb_partitioning ddb_partitioning,
  				     bool is_sprite)
  {
-	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+	unsigned int fifo_size = ilk_display_fifo_size(i915);
/* if sprites aren't enabled, sprites get nothing */
  	if (is_sprite && !config->sprites_enabled)
@@ -2646,14 +2646,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
  	if (level == 0 || config->num_pipes_active > 1) {
-		fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
+		fifo_size /= INTEL_INFO(i915)->num_pipes;
/*
  		 * For some reason the non self refresh
  		 * FIFO size is only half of the self
  		 * refresh FIFO size on ILK/SNB.
  		 */
-		if (INTEL_GEN(dev_priv) <= 6)
+		if (INTEL_GEN(i915) <= 6)
  			fifo_size /= 2;
  	}
@@ -2669,11 +2669,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
  	}
/* clamp to max that the registers can hold */
-	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+	return min(fifo_size, ilk_plane_wm_reg_max(i915, level, is_sprite));
  }
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *i915,
  				      int level,
  				      const struct intel_wm_config *config)
  {
@@ -2682,29 +2682,29 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
  		return 64;
/* otherwise just report max that registers can hold */
-	return ilk_cursor_wm_reg_max(dev_priv, level);
+	return ilk_cursor_wm_reg_max(i915, level);
  }
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(const struct drm_i915_private *i915,
  				    int level,
  				    const struct intel_wm_config *config,
  				    enum intel_ddb_partitioning ddb_partitioning,
  				    struct ilk_wm_maximums *max)
  {
-	max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
-	max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
-	max->cur = ilk_cursor_wm_max(dev_priv, level, config);
-	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+	max->pri = ilk_plane_wm_max(i915, level, config, ddb_partitioning, false);
+	max->spr = ilk_plane_wm_max(i915, level, config, ddb_partitioning, true);
+	max->cur = ilk_cursor_wm_max(i915, level, config);
+	max->fbc = ilk_fbc_wm_reg_max(i915);
  }
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *i915,
  					int level,
  					struct ilk_wm_maximums *max)
  {
-	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
-	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
-	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
-	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+	max->pri = ilk_plane_wm_reg_max(i915, level, false);
+	max->spr = ilk_plane_wm_reg_max(i915, level, true);
+	max->cur = ilk_cursor_wm_reg_max(i915, level);
+	max->fbc = ilk_fbc_wm_reg_max(i915);
  }
static bool ilk_validate_wm_level(int level,
@@ -2748,7 +2748,7 @@ static bool ilk_validate_wm_level(int level,
  	return ret;
  }
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(const struct drm_i915_private *i915,
  				 const struct intel_crtc *intel_crtc,
  				 int level,
  				 struct intel_crtc_state *cstate,
@@ -2757,9 +2757,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
  				 const struct intel_plane_state *curstate,
  				 struct intel_wm_level *result)
  {
-	u16 pri_latency = dev_priv->wm.pri_latency[level];
-	u16 spr_latency = dev_priv->wm.spr_latency[level];
-	u16 cur_latency = dev_priv->wm.cur_latency[level];
+	u16 pri_latency = i915->wm.pri_latency[level];
+	u16 spr_latency = i915->wm.spr_latency[level];
+	u16 cur_latency = i915->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
  	if (level > 0) {
@@ -2811,19 +2811,19 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
  	       PIPE_WM_LINETIME_TIME(linetime);
  }
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+static void intel_read_wm_latency(struct drm_i915_private *i915,
  				  u16 wm[8])
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
- if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		u32 val;
  		int ret, i;
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, max_level = ilk_wm_max_level(i915);
/* read the first set of memory latencies[0:3] */
  		val = 0; /* data0 to be programmed to 0 for first set */
-		ret = sandybridge_pcode_read(dev_priv,
+		ret = sandybridge_pcode_read(i915,
  					     GEN9_PCODE_READ_MEM_LATENCY,
  					     &val, NULL);
@@ -2842,7 +2842,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */
  		val = 1; /* data0 to be programmed to 1 for second set */
-		ret = sandybridge_pcode_read(dev_priv,
+		ret = sandybridge_pcode_read(i915,
  					     GEN9_PCODE_READ_MEM_LATENCY,
  					     &val, NULL);
  		if (ret) {
@@ -2893,10 +2893,10 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
  		 * any underrun. If not able to get Dimm info assume 16GB dimm
  		 * to avoid any underrun.
  		 */
-		if (dev_priv->dram_info.is_16gb_dimm)
+		if (i915->dram_info.is_16gb_dimm)
  			wm[0] += 1;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2906,14 +2906,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
  		wm[2] = (sskpd >> 12) & 0xFF;
  		wm[3] = (sskpd >> 20) & 0x1FF;
  		wm[4] = (sskpd >> 32) & 0x1FF;
-	} else if (INTEL_GEN(dev_priv) >= 6) {
+	} else if (INTEL_GEN(i915) >= 6) {
  		u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
  		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
  		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
  		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
-	} else if (INTEL_GEN(dev_priv) >= 5) {
+	} else if (INTEL_GEN(i915) >= 5) {
  		u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2921,44 +2921,44 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
  		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
  		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
  	} else {
-		MISSING_CASE(INTEL_DEVID(dev_priv));
+		MISSING_CASE(INTEL_DEVID(i915));
  	}
  }
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *i915,
  				       u16 wm[5])
  {
  	/* ILK sprite LP0 latency is 1300 ns */
-	if (IS_GEN(dev_priv, 5))
+	if (IS_GEN(i915, 5))
  		wm[0] = 13;
  }
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *i915,
  				       u16 wm[5])
  {
  	/* ILK cursor LP0 latency is 1300 ns */
-	if (IS_GEN(dev_priv, 5))
+	if (IS_GEN(i915, 5))
  		wm[0] = 13;
  }
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
+int ilk_wm_max_level(const struct drm_i915_private *i915)
  {
  	/* how many WM levels are we expecting */
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return 7;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		return 4;
-	else if (INTEL_GEN(dev_priv) >= 6)
+	else if (INTEL_GEN(i915) >= 6)
  		return 3;
  	else
  		return 2;
  }
-static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+static void intel_print_wm_latency(struct drm_i915_private *i915,
  				   const char *name,
  				   const u16 wm[8])
  {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
for (level = 0; level <= max_level; level++) {
  		unsigned int latency = wm[level];
@@ -2973,7 +2973,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
  		 * - latencies are in us on gen9.
  		 * - before then, WM1+ latency values are in 0.5us units
  		 */
-		if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			latency *= 10;
  		else if (level > 0)
  			latency *= 5;
@@ -2984,10 +2984,10 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
  	}
  }
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+static bool ilk_increase_wm_latency(struct drm_i915_private *i915,
  				    u16 wm[5], u16 min)
  {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
if (wm[0] >= min)
  		return false;
@@ -2999,7 +2999,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
  	return true;
  }
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct drm_i915_private *i915)
  {
  	bool changed;
@@ -3007,20 +3007,20 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
  	 * The BIOS provided WM memory latency values are often
  	 * inadequate for high resolution displays. Adjust them.
  	 */
-	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
-		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
-		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+	changed = ilk_increase_wm_latency(i915, i915->wm.pri_latency, 12) |
+		ilk_increase_wm_latency(i915, i915->wm.spr_latency, 12) |
+		ilk_increase_wm_latency(i915, i915->wm.cur_latency, 12);
if (!changed)
  		return;
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
-	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+	intel_print_wm_latency(i915, "Primary", i915->wm.pri_latency);
+	intel_print_wm_latency(i915, "Sprite", i915->wm.spr_latency);
+	intel_print_wm_latency(i915, "Cursor", i915->wm.cur_latency);
  }
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *i915)
  {
  	/*
  	 * On some SNB machines (Thinkpad X220 Tablet at least)
@@ -3033,50 +3033,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
  	 * interrupts only. To play it safe we disable LP3
  	 * watermarks entirely.
  	 */
-	if (dev_priv->wm.pri_latency[3] == 0 &&
-	    dev_priv->wm.spr_latency[3] == 0 &&
-	    dev_priv->wm.cur_latency[3] == 0)
+	if (i915->wm.pri_latency[3] == 0 &&
+	    i915->wm.spr_latency[3] == 0 &&
+	    i915->wm.cur_latency[3] == 0)
  		return;
- dev_priv->wm.pri_latency[3] = 0;
-	dev_priv->wm.spr_latency[3] = 0;
-	dev_priv->wm.cur_latency[3] = 0;
+	i915->wm.pri_latency[3] = 0;
+	i915->wm.spr_latency[3] = 0;
+	i915->wm.cur_latency[3] = 0;
DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
-	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+	intel_print_wm_latency(i915, "Primary", i915->wm.pri_latency);
+	intel_print_wm_latency(i915, "Sprite", i915->wm.spr_latency);
+	intel_print_wm_latency(i915, "Cursor", i915->wm.cur_latency);
  }
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct drm_i915_private *i915)
  {
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+	intel_read_wm_latency(i915, i915->wm.pri_latency);
- memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
-	       sizeof(dev_priv->wm.pri_latency));
-	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-	       sizeof(dev_priv->wm.pri_latency));
+	memcpy(i915->wm.spr_latency, i915->wm.pri_latency,
+	       sizeof(i915->wm.pri_latency));
+	memcpy(i915->wm.cur_latency, i915->wm.pri_latency,
+	       sizeof(i915->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
-	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+	intel_fixup_spr_wm_latency(i915, i915->wm.spr_latency);
+	intel_fixup_cur_wm_latency(i915, i915->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+	intel_print_wm_latency(i915, "Primary", i915->wm.pri_latency);
+	intel_print_wm_latency(i915, "Sprite", i915->wm.spr_latency);
+	intel_print_wm_latency(i915, "Cursor", i915->wm.cur_latency);
- if (IS_GEN(dev_priv, 6)) {
-		snb_wm_latency_quirk(dev_priv);
-		snb_wm_lp3_irq_quirk(dev_priv);
+	if (IS_GEN(i915, 6)) {
+		snb_wm_latency_quirk(i915);
+		snb_wm_lp3_irq_quirk(i915);
  	}
  }
-static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void skl_setup_wm_latency(struct drm_i915_private *i915)
  {
-	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
-	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
+	intel_read_wm_latency(i915, i915->wm.skl_latency);
+	intel_print_wm_latency(i915, "Gen9 Plane", i915->wm.skl_latency);
  }
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *i915,
  				 struct intel_pipe_wm *pipe_wm)
  {
  	/* LP0 watermark maximums depend on this pipe alone */
@@ -3088,7 +3088,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
  	struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
-	ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_compute_wm_maximums(i915, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
  	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3106,13 +3106,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
  	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
  	struct intel_pipe_wm *pipe_wm;
  	struct drm_device *dev = state->dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_plane *plane;
  	const struct drm_plane_state *plane_state;
  	const struct intel_plane_state *pristate = NULL;
  	const struct intel_plane_state *sprstate = NULL;
  	const struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+	int level, max_level = ilk_wm_max_level(i915), usable_level;
  	struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
@@ -3139,7 +3139,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
  	usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
-	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+	if (INTEL_GEN(i915) <= 6 && pipe_wm->sprites_enabled)
  		usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -3147,21 +3147,21 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
  		usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
-	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+	ilk_compute_wm_level(i915, intel_crtc, 0, cstate,
  			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+	if (!ilk_validate_pipe_wm(i915, pipe_wm))
  		return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+	ilk_compute_wm_reg_maximums(i915, 1, &max);
for (level = 1; level <= usable_level; level++) {
  		struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+		ilk_compute_wm_level(i915, intel_crtc, level, cstate,
  				     pristate, sprstate, curstate, wm);
/*
@@ -3186,14 +3186,14 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
  static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
  {
  	struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
  	struct intel_atomic_state *intel_state =
  		to_intel_atomic_state(newstate->base.state);
  	const struct intel_crtc_state *oldstate =
  		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
  	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
/*
  	 * Start with the final, target watermarks, then combine with the
@@ -3226,7 +3226,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
  	 * there's no safe way to transition from the old state to
  	 * the new state, so we need to fail the atomic transaction.
  	 */
-	if (!ilk_validate_pipe_wm(dev_priv, a))
+	if (!ilk_validate_pipe_wm(i915, a))
  		return -EINVAL;
/*
@@ -3242,7 +3242,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
  /*
   * Merge the watermarks from all active pipes for a specific level.
   */
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct drm_i915_private *i915,
  			       int level,
  			       struct intel_wm_level *ret_wm)
  {
@@ -3250,7 +3250,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true; - for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
+	for_each_intel_crtc(&i915->drm, intel_crtc) {
  		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
  		const struct intel_wm_level *wm = &active->wm[level];
@@ -3275,27 +3275,27 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
  /*
   * Merge all low power watermarks for all active pipes.
   */
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct drm_i915_private *i915,
  			 const struct intel_wm_config *config,
  			 const struct ilk_wm_maximums *max,
  			 struct intel_pipe_wm *merged)
  {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
  	int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
-	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+	if ((INTEL_GEN(i915) <= 6 || IS_IVYBRIDGE(i915)) &&
  	    config->num_pipes_active > 1)
  		last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
-	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
+	merged->fbc_wm_enabled = INTEL_GEN(i915) >= 6;
/* merge each WM1+ level */
  	for (level = 1; level <= max_level; level++) {
  		struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+		ilk_merge_wm_level(i915, level, wm);
if (level > last_enabled_level)
  			wm->enable = false;
@@ -3320,8 +3320,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
  	 * What we should check here is whether FBC can be
  	 * enabled sometime later.
  	 */
-	if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
-	    intel_fbc_is_active(dev_priv)) {
+	if (IS_GEN(i915, 5) && !merged->fbc_wm_enabled &&
+	    intel_fbc_is_active(i915)) {
  		for (level = 2; level <= max_level; level++) {
  			struct intel_wm_level *wm = &merged->wm[level];
@@ -3337,16 +3337,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
  }
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *i915,
  				      int level)
  {
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		return 2 * level;
  	else
-		return dev_priv->wm.pri_latency[level];
+		return i915->wm.pri_latency[level];
  }
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct drm_i915_private *i915,
  				   const struct intel_pipe_wm *merged,
  				   enum intel_ddb_partitioning partitioning,
  				   struct ilk_wm_values *results)
@@ -3370,14 +3370,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
  		 * disabled. Doing otherwise could cause underruns.
  		 */
  		results->wm_lp[wm_lp - 1] =
-			(ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
+			(ilk_wm_lp_latency(i915, level) << WM1_LP_LATENCY_SHIFT) |
  			(r->pri_val << WM1_LP_SR_SHIFT) |
  			r->cur_val;
if (r->enable)
  			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(i915) >= 8)
  			results->wm_lp[wm_lp - 1] |=
  				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
  		else
@@ -3388,7 +3388,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
  		 * Always set WM1S_LP_EN when spr_val != 0, even if the
  		 * level is disabled. Doing otherwise could cause underruns.
  		 */
-		if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
+		if (INTEL_GEN(i915) <= 6 && r->spr_val) {
  			WARN_ON(wm_lp != 1);
  			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
  		} else
@@ -3396,7 +3396,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
  	}
/* LP0 register values */
-	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
+	for_each_intel_crtc(&i915->drm, intel_crtc) {
  		enum pipe pipe = intel_crtc->pipe;
  		const struct intel_wm_level *r =
  			&intel_crtc->wm.active.ilk.wm[0];
@@ -3416,11 +3416,11 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
  /* Find the result with the highest level enabled. Check for enable_fbc_wm in
   * case both are at the same level. Prefer r1 in case they're the same. */
  static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct drm_i915_private *i915,
  		     struct intel_pipe_wm *r1,
  		     struct intel_pipe_wm *r2)
  {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
  	int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -3450,7 +3450,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
  #define WM_DIRTY_FBC (1 << 24)
  #define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *i915,
  					 const struct ilk_wm_values *old,
  					 const struct ilk_wm_values *new)
  {
@@ -3458,7 +3458,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
  	enum pipe pipe;
  	int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
  			dirty |= WM_DIRTY_LINETIME(pipe);
  			/* Must disable LP1+ watermarks too */
@@ -3502,10 +3502,10 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
  	return dirty;
  }
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct drm_i915_private *i915,
  			       unsigned int dirty)
  {
-	struct ilk_wm_values *previous = &dev_priv->wm.hw;
+	struct ilk_wm_values *previous = &i915->wm.hw;
  	bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
@@ -3536,18 +3536,18 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
   * The spec says we shouldn't write when we don't need, because every write
   * causes WMs to be re-evaluated, expending some power.
   */
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct drm_i915_private *i915,
  				struct ilk_wm_values *results)
  {
-	struct ilk_wm_values *previous = &dev_priv->wm.hw;
+	struct ilk_wm_values *previous = &i915->wm.hw;
  	unsigned int dirty;
  	u32 val;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+	dirty = ilk_compute_wm_dirty(i915, previous, results);
  	if (!dirty)
  		return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+	_ilk_disable_lp_wm(i915, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
  		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -3564,7 +3564,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
-		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  			val = I915_READ(WM_MISC);
  			if (results->partitioning == INTEL_DDB_PART_1_2)
  				val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -3594,7 +3594,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
  		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_GEN(dev_priv) >= 7) {
+	if (INTEL_GEN(i915) >= 7) {
  		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
  			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
  		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -3608,17 +3608,17 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
  		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->wm.hw = *results;
+	i915->wm.hw = *results;
  }
bool ilk_disable_lp_wm(struct drm_device *dev)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+	return _ilk_disable_lp_wm(i915, WM_DIRTY_LP_ALL);
  }
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *i915)
  {
  	u8 enabled_slices;
@@ -3626,7 +3626,7 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
  	enabled_slices = 1;
/* Gen prior to GEN11 have only one DBuf slice */
-	if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return enabled_slices;
/*
@@ -3644,16 +3644,16 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
   * FIXME: We still don't have the proper code detect if we need to apply the WA,
   * so assume we'll always need it in order to avoid underruns.
   */
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
  {
-	return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
+	return IS_GEN9_BC(i915) || IS_BROXTON(i915);
  }
static bool
-intel_has_sagv(struct drm_i915_private *dev_priv)
+intel_has_sagv(struct drm_i915_private *i915)
  {
-	return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
-		dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
+	return (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) &&
+		i915->sagv_status != I915_SAGV_NOT_CONTROLLED;
  }
/*
@@ -3668,18 +3668,18 @@ intel_has_sagv(struct drm_i915_private *dev_priv)
   *  - We're not using an interlaced display configuration
   */
  int
-intel_enable_sagv(struct drm_i915_private *dev_priv)
+intel_enable_sagv(struct drm_i915_private *i915)
  {
  	int ret;
- if (!intel_has_sagv(dev_priv))
+	if (!intel_has_sagv(i915))
  		return 0;
- if (dev_priv->sagv_status == I915_SAGV_ENABLED)
+	if (i915->sagv_status == I915_SAGV_ENABLED)
  		return 0;
DRM_DEBUG_KMS("Enabling SAGV\n");
-	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+	ret = sandybridge_pcode_write(i915, GEN9_PCODE_SAGV_CONTROL,
  				      GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -3688,33 +3688,33 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
  	 * Some skl systems, pre-release machines in particular,
  	 * don't actually have SAGV.
  	 */
-	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
+	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
  		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
-		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+		i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
  		return 0;
  	} else if (ret < 0) {
  		DRM_ERROR("Failed to enable SAGV\n");
  		return ret;
  	}
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+	i915->sagv_status = I915_SAGV_ENABLED;
  	return 0;
  }
int
-intel_disable_sagv(struct drm_i915_private *dev_priv)
+intel_disable_sagv(struct drm_i915_private *i915)
  {
  	int ret;
- if (!intel_has_sagv(dev_priv))
+	if (!intel_has_sagv(i915))
  		return 0;
- if (dev_priv->sagv_status == I915_SAGV_DISABLED)
+	if (i915->sagv_status == I915_SAGV_DISABLED)
  		return 0;
DRM_DEBUG_KMS("Disabling SAGV\n");
  	/* bspec says to keep retrying for at least 1 ms */
-	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+	ret = skl_pcode_request(i915, GEN9_PCODE_SAGV_CONTROL,
  				GEN9_SAGV_DISABLE,
  				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
  				1);
@@ -3722,23 +3722,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
  	 * Some skl systems, pre-release machines in particular,
  	 * don't actually have SAGV.
  	 */
-	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
+	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
  		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
-		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+		i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
  		return 0;
  	} else if (ret < 0) {
  		DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
  		return ret;
  	}
- dev_priv->sagv_status = I915_SAGV_DISABLED;
+	i915->sagv_status = I915_SAGV_DISABLED;
  	return 0;
  }
bool intel_can_enable_sagv(struct drm_atomic_state *state)
  {
  	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  	struct intel_crtc *crtc;
  	struct intel_plane *plane;
@@ -3747,12 +3747,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
  	int level, latency;
  	int sagv_block_time_us;
- if (!intel_has_sagv(dev_priv))
+	if (!intel_has_sagv(i915))
  		return false;
- if (IS_GEN(dev_priv, 9))
+	if (IS_GEN(i915, 9))
  		sagv_block_time_us = 30;
-	else if (IS_GEN(dev_priv, 10))
+	else if (IS_GEN(i915, 10))
  		sagv_block_time_us = 20;
  	else
  		sagv_block_time_us = 10;
@@ -3772,7 +3772,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
  	pipe = ffs(intel_state->active_crtcs) - 1;
-	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	crtc = intel_get_crtc_for_pipe(i915, pipe);
  	cstate = to_intel_crtc_state(crtc->base.state);
if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -3787,13 +3787,13 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
  			continue;
/* Find the highest enabled wm level for this plane */
-		for (level = ilk_wm_max_level(dev_priv);
+		for (level = ilk_wm_max_level(i915);
  		     !wm->wm[level].plane_en; --level)
  		     { }
- latency = dev_priv->wm.skl_latency[level];
+		latency = i915->wm.skl_latency[level];
- if (skl_needs_memory_bw_wa(dev_priv) &&
+		if (skl_needs_memory_bw_wa(i915) &&
  		    plane->base.state->fb->modifier ==
  		    I915_FORMAT_MOD_X_TILED)
  			latency += 15;
@@ -3810,7 +3810,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
  	return true;
  }
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
+static u16 intel_get_ddb_size(struct drm_i915_private *i915,
  			      const struct intel_crtc_state *cstate,
  			      const u64 total_data_rate,
  			      const int num_active,
@@ -3818,11 +3818,11 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
  {
  	const struct drm_display_mode *adjusted_mode;
  	u64 total_data_bw;
-	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+	u16 ddb_size = INTEL_INFO(i915)->ddb_size;
WARN_ON(ddb_size == 0); - if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &cstate->base.adjusted_mode;
@@ -3847,7 +3847,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
  }
static void
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *i915,
  				   const struct intel_crtc_state *cstate,
  				   const u64 total_data_rate,
  				   struct skl_ddb_allocation *ddb,
@@ -3867,16 +3867,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
  	if (WARN_ON(!state) || !cstate->base.active) {
  		alloc->start = 0;
  		alloc->end = 0;
-		*num_active = hweight32(dev_priv->active_crtcs);
+		*num_active = hweight32(i915->active_crtcs);
  		return;
  	}
if (intel_state->active_pipe_changes)
  		*num_active = hweight32(intel_state->active_crtcs);
  	else
-		*num_active = hweight32(dev_priv->active_crtcs);
+		*num_active = hweight32(i915->active_crtcs);
- ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+	ddb_size = intel_get_ddb_size(i915, cstate, total_data_rate,
  				      *num_active, ddb);
/*
@@ -3939,8 +3939,8 @@ static unsigned int
  skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
  		      int num_active)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
+	int level, max_level = ilk_wm_max_level(i915);
  	struct skl_wm_level wm = {};
  	int ret, min_ddb_alloc = 0;
  	struct skl_wm_params wp;
@@ -3963,7 +3963,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
  	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
  }
-static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
+static void skl_ddb_entry_init_from_hw(struct drm_i915_private *i915,
  				       struct skl_ddb_entry *entry, u32 reg)
  {
@@ -3975,7 +3975,7 @@ static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
  }
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
+skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
  			   const enum pipe pipe,
  			   const enum plane_id plane_id,
  			   struct skl_ddb_entry *ddb_y,
@@ -3987,7 +3987,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
  	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
  	if (plane_id == PLANE_CURSOR) {
  		val = I915_READ(CUR_BUF_CFG(pipe));
-		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+		skl_ddb_entry_init_from_hw(i915, ddb_y, val);
  		return;
  	}
@@ -3999,9 +3999,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
  					      val & PLANE_CTL_ORDER_RGBX,
  					      val & PLANE_CTL_ALPHA_MASK);
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+		skl_ddb_entry_init_from_hw(i915, ddb_y, val);
  	} else {
  		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
  		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
@@ -4009,8 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
  		if (is_planar_yuv_format(fourcc))
  			swap(val, val2);
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
-		skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
+		skl_ddb_entry_init_from_hw(i915, ddb_y, val);
+		skl_ddb_entry_init_from_hw(i915, ddb_uv, val2);
  	}
  }
@@ -4018,30 +4018,30 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
  			       struct skl_ddb_entry *ddb_y,
  			       struct skl_ddb_entry *ddb_uv)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum intel_display_power_domain power_domain;
  	enum pipe pipe = crtc->pipe;
  	intel_wakeref_t wakeref;
  	enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return;
for_each_plane_id_on_crtc(crtc, plane_id)
-		skl_ddb_get_hw_plane_state(dev_priv, pipe,
+		skl_ddb_get_hw_plane_state(i915, pipe,
  					   plane_id,
  					   &ddb_y[plane_id],
  					   &ddb_uv[plane_id]);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
  }
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+void skl_ddb_get_hw_state(struct drm_i915_private *i915,
  			  struct skl_ddb_allocation *ddb /* out */)
  {
-	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+	ddb->enabled_slices = intel_enabled_dbuf_slices_num(i915);
  }
/*
@@ -4138,7 +4138,7 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
  int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
  				  struct intel_crtc_state *cstate)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_crtc->base.dev);
  	struct drm_crtc_state *crtc_state = &cstate->base;
  	struct drm_atomic_state *state = crtc_state->state;
  	struct drm_plane *plane;
@@ -4181,7 +4181,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
  	crtc_clock = crtc_state->adjusted_mode.crtc_clock;
  	dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
+	if (IS_GEMINILAKE(i915) || INTEL_GEN(i915) >= 10)
  		dotclk *= 2;
pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
@@ -4341,7 +4341,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  {
  	struct drm_atomic_state *state = cstate->base.state;
  	struct drm_crtc *crtc = cstate->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
  	u16 alloc_size, start = 0;
@@ -4367,7 +4367,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  		return 0;
  	}
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		total_data_rate =
  			icl_get_total_relative_data_rate(cstate,
  							 plane_data_rate);
@@ -4378,7 +4378,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  							 uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+	skl_ddb_get_pipe_allocation_limits(i915, cstate, total_data_rate,
  					   ddb, alloc, &num_active);
  	alloc_size = skl_ddb_entry_size(alloc);
  	if (alloc_size == 0)
@@ -4398,7 +4398,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  	 * Find the highest watermark level for which we can satisfy the block
  	 * requirement of active planes.
  	 */
-	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
+	for (level = ilk_wm_max_level(i915); level >= 0; level--) {
  		blocks = 0;
  		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
  			const struct skl_plane_wm *wm =
@@ -4484,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  			continue;
/* Gen11+ uses a separate plane for UV watermarks */
-		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+		WARN_ON(INTEL_GEN(i915) >= 11 && uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
  		if (total[plane_id]) {
@@ -4506,7 +4506,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  	 * all levels as "enabled."  Go back now and disable the ones
  	 * that aren't actually possible.
  	 */
-	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
+	for (level++; level <= ilk_wm_max_level(i915); level++) {
  		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
  			struct skl_plane_wm *wm =
  				&cstate->wm.skl.optimal.planes[plane_id];
@@ -4531,7 +4531,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  			 * Wa_1408961008:icl, ehl
  			 * Underruns with WM1+ disabled
  			 */
-			if (IS_GEN(dev_priv, 11) &&
+			if (IS_GEN(i915, 11) &&
  			    level == 1 && wm->wm[0].plane_en) {
  				wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
  				wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
@@ -4562,7 +4562,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
   * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
  */
  static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
+skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
  	       u8 cpp, u32 latency, u32 dbuf_block_size)
  {
  	u32 wm_intermediate_val;
@@ -4574,7 +4574,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
  	wm_intermediate_val = latency * pixel_rate * cpp;
  	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(i915) >= 10)
  		ret = add_fixed16_u32(ret, 1);
return ret;
@@ -4648,7 +4648,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
  		      int color_plane)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	u32 interm_pbpl;
/* only planar format has two planes */
@@ -4673,7 +4673,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
  	wp->cpp = format->cpp[color_plane];
  	wp->plane_pixel_rate = plane_pixel_rate;
- if (INTEL_GEN(dev_priv) >= 11 &&
+	if (INTEL_GEN(i915) >= 11 &&
  	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
  		wp->dbuf_block_size = 256;
  	else
@@ -4698,7 +4698,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
  		wp->y_min_scanlines = 4;
  	}
- if (skl_needs_memory_bw_wa(dev_priv))
+	if (skl_needs_memory_bw_wa(i915))
  		wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -4707,12 +4707,12 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
  					   wp->y_min_scanlines,
  					   wp->dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+		if (INTEL_GEN(i915) >= 10)
  			interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
  							wp->y_min_scanlines);
-	} else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
+	} else if (wp->x_tiled && IS_GEN(i915, 9)) {
  		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
  					   wp->dbuf_block_size);
  		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4758,9 +4758,9 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
  				     wp, color_plane);
  }
-static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
+static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
  {
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		return true;
/* The number of lines are ignored for the level 0 watermark. */
@@ -4773,8 +4773,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  				 const struct skl_wm_level *result_prev,
  				 struct skl_wm_level *result /* out */)
  {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	u32 latency = dev_priv->wm.skl_latency[level];
+	struct drm_i915_private *i915 = to_i915(cstate->base.crtc->dev);
+	u32 latency = i915->wm.skl_latency[level];
  	uint_fixed_16_16_t method1, method2;
  	uint_fixed_16_16_t selected_result;
  	u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -4789,14 +4789,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
  	 * Display WA #1141: kbl,cfl
  	 */
-	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
-	    dev_priv->ipc_enabled)
+	if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) ||
+	    i915->ipc_enabled)
  		latency += 4;
- if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
+	if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
  		latency += 15;
- method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
+	method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
  				 wp->cpp, latency, wp->dbuf_block_size);
  	method2 = skl_wm_method2(wp->plane_pixel_rate,
  				 cstate->base.adjusted_mode.crtc_htotal,
@@ -4811,8 +4811,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
  			selected_result = method2;
  		} else if (latency >= wp->linetime_us) {
-			if (IS_GEN(dev_priv, 9) &&
-			    !IS_GEMINILAKE(dev_priv))
+			if (IS_GEN(i915, 9) &&
+			    !IS_GEMINILAKE(i915))
  				selected_result = min_fixed16(method1, method2);
  			else
  				selected_result = method2;
@@ -4825,7 +4825,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  	res_lines = div_round_up_fixed16(selected_result,
  					 wp->plane_blocks_per_line);
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+	if (IS_GEN9_BC(i915) || IS_BROXTON(i915)) {
  		/* Display WA #1125: skl,bxt,kbl */
  		if (level == 0 && wp->rc_surface)
  			res_blocks +=
@@ -4852,7 +4852,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  		}
  	}
- if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
  		if (wp->y_tiled) {
  			int extra_lines;
@@ -4870,7 +4870,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
  		}
  	}
- if (!skl_wm_has_lines(dev_priv, level))
+	if (!skl_wm_has_lines(i915, level))
  		res_lines = 0;
if (res_lines > 31) {
@@ -4897,8 +4897,8 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
  		      const struct skl_wm_params *wm_params,
  		      struct skl_wm_level *levels)
  {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	struct drm_i915_private *i915 = to_i915(cstate->base.crtc->dev);
+	int level, max_level = ilk_wm_max_level(i915);
  	struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
@@ -4915,7 +4915,7 @@ static u32
  skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
  {
  	struct drm_atomic_state *state = cstate->base.state;
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct drm_i915_private *i915 = to_i915(state->dev);
  	uint_fixed_16_16_t linetime_us;
  	u32 linetime_wm;
@@ -4923,7 +4923,7 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
  	linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: BXT:ALL GLK:ALL */
-	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+	if (IS_GEN9_LP(i915) && i915->ipc_enabled)
  		linetime_wm /= 2;
return linetime_wm;
@@ -4934,21 +4934,21 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
  				      struct skl_plane_wm *wm)
  {
  	struct drm_device *dev = cstate->base.crtc->dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct drm_i915_private *i915 = to_i915(dev);
  	u16 trans_min, trans_y_tile_min;
  	const u16 trans_amount = 10; /* This is configurable amount */
  	u16 wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
-	if (INTEL_GEN(dev_priv) <= 9)
+	if (INTEL_GEN(i915) <= 9)
  		return;
/* Transition WM don't make any sense if ipc is disabled */
-	if (!dev_priv->ipc_enabled)
+	if (!i915->ipc_enabled)
  		return;
trans_min = 14;
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		trans_min = 4;
trans_offset_b = trans_min + trans_amount;
@@ -4974,7 +4974,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
  		res_blocks = wm0_sel_res_b + trans_offset_b;
/* WA BUG:1938466 add one block for non y-tile planes */
-		if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
+		if (IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0))
  			res_blocks += 1;
}
@@ -5093,7 +5093,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
  {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(cstate->base.crtc->dev);
  	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
  	struct drm_crtc_state *crtc_state = &cstate->base;
  	struct drm_plane *plane;
@@ -5110,7 +5110,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
  		const struct intel_plane_state *intel_pstate =
  						to_intel_plane_state(pstate);
- if (INTEL_GEN(dev_priv) >= 11)
+		if (INTEL_GEN(i915) >= 11)
  			ret = icl_build_plane_wm(cstate, intel_pstate);
  		else
  			ret = skl_build_plane_wm(cstate, intel_pstate);
@@ -5123,7 +5123,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
  	return 0;
  }
-static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
+static void skl_ddb_entry_write(struct drm_i915_private *i915,
  				i915_reg_t reg,
  				const struct skl_ddb_entry *entry)
  {
@@ -5133,7 +5133,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
  		I915_WRITE_FW(reg, 0);
  }
-static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+static void skl_write_wm_level(struct drm_i915_private *i915,
  			       i915_reg_t reg,
  			       const struct skl_wm_level *level)
  {
@@ -5152,8 +5152,8 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
  void skl_write_plane_wm(struct intel_plane *plane,
  			const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	int level, max_level = ilk_wm_max_level(i915);
  	enum plane_id plane_id = plane->id;
  	enum pipe pipe = plane->pipe;
  	const struct skl_plane_wm *wm =
@@ -5164,14 +5164,14 @@ void skl_write_plane_wm(struct intel_plane *plane,
  		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
-		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
+		skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
  				   &wm->wm[level]);
  	}
-	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
+	skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
  			   &wm->trans_wm);
- if (INTEL_GEN(dev_priv) >= 11) {
-		skl_ddb_entry_write(dev_priv,
+	if (INTEL_GEN(i915) >= 11) {
+		skl_ddb_entry_write(i915,
  				    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
  		return;
  	}
@@ -5179,17 +5179,17 @@ void skl_write_plane_wm(struct intel_plane *plane,
  	if (wm->is_planar)
  		swap(ddb_y, ddb_uv);
- skl_ddb_entry_write(dev_priv,
+	skl_ddb_entry_write(i915,
  			    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
-	skl_ddb_entry_write(dev_priv,
+	skl_ddb_entry_write(i915,
  			    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
  }
void skl_write_cursor_wm(struct intel_plane *plane,
  			 const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+	int level, max_level = ilk_wm_max_level(i915);
  	enum plane_id plane_id = plane->id;
  	enum pipe pipe = plane->pipe;
  	const struct skl_plane_wm *wm =
@@ -5198,12 +5198,12 @@ void skl_write_cursor_wm(struct intel_plane *plane,
  		&crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
-		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+		skl_write_wm_level(i915, CUR_WM(pipe, level),
  				   &wm->wm[level]);
  	}
-	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
+	skl_write_wm_level(i915, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
+	skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
  }
bool skl_wm_level_equals(const struct skl_wm_level *l1,
@@ -5215,11 +5215,11 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
  		l1->plane_res_b == l2->plane_res_b;
  }
-static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+static bool skl_plane_wm_equals(struct drm_i915_private *i915,
  				const struct skl_plane_wm *wm1,
  				const struct skl_plane_wm *wm2)
  {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, max_level = ilk_wm_max_level(i915);
for (level = 0; level <= max_level; level++) {
  		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
@@ -5234,11 +5234,11 @@ static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
  			       const struct skl_pipe_wm *wm1,
  			       const struct skl_pipe_wm *wm2)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
-		if (!skl_plane_wm_equals(dev_priv,
+		if (!skl_plane_wm_equals(i915,
  					 &wm1->planes[plane_id],
  					 &wm2->planes[plane_id]))
  			return false;
@@ -5287,10 +5287,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
  {
  	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  		struct intel_plane_state *plane_state;
  		enum plane_id plane_id = plane->id;
@@ -5313,14 +5313,14 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
  static int
  skl_compute_ddb(struct intel_atomic_state *state)
  {
-	const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
  	struct intel_crtc_state *old_crtc_state;
  	struct intel_crtc_state *new_crtc_state;
  	struct intel_crtc *crtc;
  	int ret, i;
- memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+	memcpy(ddb, &i915->wm.skl_hw.ddb, sizeof(*ddb));
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
  					    new_crtc_state, i) {
@@ -5345,7 +5345,7 @@ static char enast(bool enable)
  static void
  skl_print_wm_changes(struct intel_atomic_state *state)
  {
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	const struct intel_crtc_state *old_crtc_state;
  	const struct intel_crtc_state *new_crtc_state;
  	struct intel_plane *plane;
@@ -5362,7 +5362,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
  		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
  		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  			enum plane_id plane_id = plane->id;
  			const struct skl_ddb_entry *old, *new;
@@ -5378,14 +5378,14 @@ skl_print_wm_changes(struct intel_atomic_state *state)
  				      skl_ddb_entry_size(old), skl_ddb_entry_size(new));
  		}
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  			enum plane_id plane_id = plane->id;
  			const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
  			new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
+			if (skl_plane_wm_equals(i915, old_wm, new_wm))
  				continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
@@ -5460,7 +5460,7 @@ static int
  skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
  {
  	struct drm_device *dev = state->base.dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *crtc;
  	struct intel_crtc_state *crtc_state;
  	u32 realloc_pipes = pipes_modified(state);
@@ -5470,14 +5470,14 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
  	 * When we distrust bios wm we always need to recompute to set the
  	 * expected DDB allocations for each CRTC.
  	 */
-	if (dev_priv->wm.distrust_bios_wm)
+	if (i915->wm.distrust_bios_wm)
  		(*changed) = true;
/*
  	 * If this transaction isn't actually touching any CRTC's, don't
  	 * bother with watermark calculation.  Note that if we pass this
  	 * test, we're guaranteed to hold at least one CRTC state mutex,
-	 * which means we can safely use values like dev_priv->active_crtcs
+	 * which means we can safely use values like i915->active_crtcs
  	 * since any racing commits that want to update them would need to
  	 * hold _all_ CRTC state mutexes.
  	 */
@@ -5493,7 +5493,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
  	 * pretend that all pipes switched active status so that we'll
  	 * ensure a full DDB recompute.
  	 */
-	if (dev_priv->wm.distrust_bios_wm) {
+	if (i915->wm.distrust_bios_wm) {
  		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
  				       state->base.acquire_ctx);
  		if (ret)
@@ -5508,7 +5508,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
  		 * on the first commit too.
  		 */
  		if (!state->modeset)
-			state->active_crtcs = dev_priv->active_crtcs;
+			state->active_crtcs = i915->active_crtcs;
  	}
/*
@@ -5567,14 +5567,14 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
  static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
  				      struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct intel_crtc_state *old_crtc_state =
  		intel_atomic_get_old_crtc_state(state, crtc);
  	struct intel_crtc_state *new_crtc_state =
  		intel_atomic_get_new_crtc_state(state, crtc);
  	struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
  		struct intel_plane_state *plane_state;
  		enum plane_id plane_id = plane->id;
@@ -5587,7 +5587,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
  		 * with the software state.
  		 */
  		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
-		    skl_plane_wm_equals(dev_priv,
+		    skl_plane_wm_equals(i915,
  					&old_crtc_state->wm.skl.optimal.planes[plane_id],
  					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
  			continue;
@@ -5654,7 +5654,7 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
  				      struct intel_crtc_state *cstate)
  {
  	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
  	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
  	enum pipe pipe = crtc->pipe;
@@ -5669,27 +5669,27 @@ static void skl_initial_wm(struct intel_atomic_state *state,
  {
  	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
  	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct skl_ddb_values *results = &state->wm_results;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
  		return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
if (cstate->base.active_changed)
  		skl_atomic_update_crtc_wm(state, cstate);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct drm_i915_private *i915,
  				  struct intel_wm_config *config)
  {
  	struct intel_crtc *crtc;
/* Compute the currently _active_ config */
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -5701,7 +5701,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
  	}
  }
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct drm_i915_private *i915)
  {
  	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
  	struct ilk_wm_maximums max;
@@ -5709,18 +5709,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
  	struct ilk_wm_values results = {};
  	enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+	ilk_compute_wm_config(i915, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
-	ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+	ilk_compute_wm_maximums(i915, 1, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_wm_merge(i915, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
-	if (INTEL_GEN(dev_priv) >= 7 &&
+	if (INTEL_GEN(i915) >= 7 &&
  	    config.num_pipes_active == 1 && config.sprites_enabled) {
-		ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
-		ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+		ilk_compute_wm_maximums(i915, 1, &config, INTEL_DDB_PART_5_6, &max);
+		ilk_wm_merge(i915, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+		best_lp_wm = ilk_find_best_result(i915, &lp_wm_1_2, &lp_wm_5_6);
  	} else {
  		best_lp_wm = &lp_wm_1_2;
  	}
@@ -5728,35 +5728,35 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
  	partitioning = (best_lp_wm == &lp_wm_1_2) ?
  		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+	ilk_compute_wm_results(i915, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+	ilk_write_wm_values(i915, &results);
  }
static void ilk_initial_watermarks(struct intel_atomic_state *state,
  				   struct intel_crtc_state *cstate)
  {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(cstate->base.crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
-	ilk_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	ilk_program_watermarks(i915);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
  				    struct intel_crtc_state *cstate)
  {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *i915 = to_i915(cstate->base.crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
  	if (cstate->wm.need_postvbl_update) {
  		intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
-		ilk_program_watermarks(dev_priv);
+		ilk_program_watermarks(i915);
  	}
-	mutex_unlock(&dev_priv->wm.wm_mutex);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
static inline void skl_wm_level_from_reg_val(u32 val,
@@ -5772,13 +5772,13 @@ static inline void skl_wm_level_from_reg_val(u32 val,
  void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
  			      struct skl_pipe_wm *out)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	int level, max_level;
  	enum plane_id plane_id;
  	u32 val;
- max_level = ilk_wm_max_level(dev_priv);
+	max_level = ilk_wm_max_level(i915);
for_each_plane_id_on_crtc(crtc, plane_id) {
  		struct skl_plane_wm *wm = &out->planes[plane_id];
@@ -5806,15 +5806,15 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
  	out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
  }
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
+void skl_wm_get_hw_state(struct drm_i915_private *i915)
  {
-	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
-	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+	struct skl_ddb_values *hw = &i915->wm.skl_hw;
+	struct skl_ddb_allocation *ddb = &i915->wm.skl_hw.ddb;
  	struct intel_crtc *crtc;
  	struct intel_crtc_state *cstate;
- skl_ddb_get_hw_state(dev_priv, ddb);
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
+	skl_ddb_get_hw_state(i915, ddb);
+	for_each_intel_crtc(&i915->drm, crtc) {
  		cstate = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
@@ -5823,17 +5823,17 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
  			hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
  	}
- if (dev_priv->active_crtcs) {
+	if (i915->active_crtcs) {
  		/* Fully recompute DDB on first atomic commit */
-		dev_priv->wm.distrust_bios_wm = true;
+		i915->wm.distrust_bios_wm = true;
  	}
  }
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
  {
  	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct ilk_wm_values *hw = &dev_priv->wm.hw;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct ilk_wm_values *hw = &i915->wm.hw;
  	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
  	struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
  	enum pipe pipe = crtc->pipe;
@@ -5844,7 +5844,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
  	};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -5866,7 +5866,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
  		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
  		active->linetime = hw->wm_linetime[pipe];
  	} else {
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, max_level = ilk_wm_max_level(i915);
/*
  		 * For inactive pipes, all watermark levels
@@ -5885,7 +5885,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
  #define _FW_WM_VLV(value, plane) \
  	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct drm_i915_private *i915,
  			       struct g4x_wm_values *wm)
  {
  	u32 tmp;
@@ -5911,13 +5911,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
  	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
  }
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct drm_i915_private *i915,
  			       struct vlv_wm_values *wm)
  {
  	enum pipe pipe;
  	u32 tmp;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		tmp = I915_READ(VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
@@ -5944,7 +5944,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
  	tmp = I915_READ(DSPFW3);
  	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		tmp = I915_READ(DSPFW7_CHV);
  		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
  		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
@@ -5987,16 +5987,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
  #undef _FW_WM
  #undef _FW_WM_VLV
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+void g4x_wm_get_hw_state(struct drm_i915_private *i915)
  {
-	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+	struct g4x_wm_values *wm = &i915->wm.g4x;
  	struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+	g4x_read_wm_values(i915, wm);
wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; - for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6071,16 +6071,16 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
  		      yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
  }
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+void g4x_wm_sanitize(struct drm_i915_private *i915)
  {
  	struct intel_plane *plane;
  	struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+	for_each_intel_plane(&i915->drm, plane) {
  		struct intel_crtc *crtc =
-			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+			intel_get_crtc_for_pipe(i915, plane->pipe);
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		struct intel_plane_state *plane_state =
@@ -6113,7 +6113,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
  		}
  	}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
@@ -6122,26 +6122,26 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
  		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
  	}
- g4x_program_watermarks(dev_priv);
+	g4x_program_watermarks(i915);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+void vlv_wm_get_hw_state(struct drm_i915_private *i915)
  {
-	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	struct vlv_wm_values *wm = &i915->wm.vlv;
  	struct intel_crtc *crtc;
  	u32 val;
- vlv_read_wm_values(dev_priv, wm);
+	vlv_read_wm_values(i915, wm);
wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
  	wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
-		vlv_punit_get(dev_priv);
+	if (IS_CHERRYVIEW(i915)) {
+		vlv_punit_get(i915);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+		val = vlv_punit_read(i915, PUNIT_REG_DSPSSPM);
  		if (val & DSP_MAXFIFO_PM5_ENABLE)
  			wm->level = VLV_WM_LEVEL_PM5;
@@ -6154,25 +6154,25 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
  		 * HIGH/LOW bits so that we don't actually change
  		 * the current state.
  		 */
-		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+		val = vlv_punit_read(i915, PUNIT_REG_DDR_SETUP2);
  		val |= FORCE_DDR_FREQ_REQ_ACK;
-		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+		vlv_punit_write(i915, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+		if (wait_for((vlv_punit_read(i915, PUNIT_REG_DDR_SETUP2) &
  			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
  			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
  				      "assuming DDR DVFS is disabled\n");
-			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+			i915->wm.max_level = VLV_WM_LEVEL_PM5;
  		} else {
-			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+			val = vlv_punit_read(i915, PUNIT_REG_DDR_SETUP2);
  			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
  				wm->level = VLV_WM_LEVEL_DDR_DVFS;
  		}
- vlv_punit_put(dev_priv);
+		vlv_punit_put(i915);
  	}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6224,16 +6224,16 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
  		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
  }
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+void vlv_wm_sanitize(struct drm_i915_private *i915)
  {
  	struct intel_plane *plane;
  	struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+	mutex_lock(&i915->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+	for_each_intel_plane(&i915->drm, plane) {
  		struct intel_crtc *crtc =
-			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+			intel_get_crtc_for_pipe(i915, plane->pipe);
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
  		struct intel_plane_state *plane_state =
@@ -6259,7 +6259,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
  		}
  	}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+	for_each_intel_crtc(&i915->drm, crtc) {
  		struct intel_crtc_state *crtc_state =
  			to_intel_crtc_state(crtc->base.state);
@@ -6268,16 +6268,16 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
  		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
  	}
- vlv_program_watermarks(dev_priv);
+	vlv_program_watermarks(i915);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+	mutex_unlock(&i915->wm.wm_mutex);
  }
/*
   * FIXME should probably kill this and improve
   * the real watermark readout/sanitation instead
   */
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct drm_i915_private *i915)
  {
  	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
  	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6289,14 +6289,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
  	 */
  }
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+void ilk_wm_get_hw_state(struct drm_i915_private *i915)
  {
-	struct ilk_wm_values *hw = &dev_priv->wm.hw;
+	struct ilk_wm_values *hw = &i915->wm.hw;
  	struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+	ilk_init_lp_watermarks(i915);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+	for_each_intel_crtc(&i915->drm, crtc)
  		ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -6304,15 +6304,15 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
  	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-	if (INTEL_GEN(dev_priv) >= 7) {
+	if (INTEL_GEN(i915) >= 7) {
  		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
  		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
  	}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
  			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-	else if (IS_IVYBRIDGE(dev_priv))
+	else if (IS_IVYBRIDGE(i915))
  		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
  			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -6355,22 +6355,22 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
   */
  void intel_update_watermarks(struct intel_crtc *crtc)
  {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (dev_priv->display.update_wm)
-		dev_priv->display.update_wm(crtc);
+	if (i915->display.update_wm)
+		i915->display.update_wm(crtc);
  }
-void intel_enable_ipc(struct drm_i915_private *dev_priv)
+void intel_enable_ipc(struct drm_i915_private *i915)
  {
  	u32 val;
- if (!HAS_IPC(dev_priv))
+	if (!HAS_IPC(i915))
  		return;
val = I915_READ(DISP_ARB_CTL2); - if (dev_priv->ipc_enabled)
+	if (i915->ipc_enabled)
  		val |= DISP_IPC_ENABLE;
  	else
  		val &= ~DISP_IPC_ENABLE;
@@ -6378,27 +6378,27 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
  	I915_WRITE(DISP_ARB_CTL2, val);
  }
-static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
+static bool intel_can_enable_ipc(struct drm_i915_private *i915)
  {
  	/* Display WA #0477 WaDisableIPC: skl */
-	if (IS_SKYLAKE(dev_priv))
+	if (IS_SKYLAKE(i915))
  		return false;
/* Display WA #1141: SKL:all KBL:all CFL */
-	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
-		return dev_priv->dram_info.symmetric_memory;
+	if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
+		return i915->dram_info.symmetric_memory;
return true;
  }
-void intel_init_ipc(struct drm_i915_private *dev_priv)
+void intel_init_ipc(struct drm_i915_private *i915)
  {
-	if (!HAS_IPC(dev_priv))
+	if (!HAS_IPC(i915))
  		return;
- dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
+	i915->ipc_enabled = intel_can_enable_ipc(i915);
- intel_enable_ipc(dev_priv);
+	intel_enable_ipc(i915);
  }
/*
@@ -6430,9 +6430,9 @@ bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
  	return true;
  }
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
+static void ironlake_enable_drps(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
  	u32 rgvmodectl;
  	u8 fmax, fmin, fstart, vstart;
@@ -6463,12 +6463,12 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
  	vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
  		  PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
- dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
-	dev_priv->ips.fstart = fstart;
+	i915->ips.fmax = fmax; /* IPS callback will increase this */
+	i915->ips.fstart = fstart;
- dev_priv->ips.max_delay = fstart;
-	dev_priv->ips.min_delay = fmin;
-	dev_priv->ips.cur_delay = fstart;
+	i915->ips.max_delay = fstart;
+	i915->ips.min_delay = fmin;
+	i915->ips.cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
  			 fmax, fmin, fstart);
@@ -6492,15 +6492,15 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
  		DRM_ERROR("stuck trying to change perf mode\n");
  	mdelay(1);
- ironlake_set_drps(dev_priv, fstart);
+	ironlake_set_drps(i915, fstart);
- dev_priv->ips.last_count1 =
+	i915->ips.last_count1 =
  		intel_uncore_read(uncore, DMIEC) +
  		intel_uncore_read(uncore, DDREC) +
  		intel_uncore_read(uncore, CSIEC);
-	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
-	dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
-	dev_priv->ips.last_time2 = ktime_get_raw_ns();
+	i915->ips.last_time1 = jiffies_to_msecs(jiffies);
+	i915->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+	i915->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock);
  }
@@ -6543,9 +6543,9 @@ static void ironlake_disable_drps(struct drm_i915_private *i915)
   * ourselves, instead of doing a rmw cycle (which might result in us clearing
   * all limits and the gpu stuck at whatever frequency it is at atm).
   */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static u32 intel_rps_limits(struct drm_i915_private *i915, u8 val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 limits;
/* Only set the down limit when we've reached the lowest level to avoid
@@ -6554,7 +6554,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  	 * the hw runs at the minimal clock before selecting the desired
  	 * frequency, if the down threshold expires in that window we will not
  	 * receive a down interrupt. */
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(i915) >= 9) {
  		limits = (rps->max_freq_softlimit) << 23;
  		if (val <= rps->min_freq_softlimit)
  			limits |= (rps->min_freq_softlimit) << 14;
@@ -6567,9 +6567,9 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  	return limits;
  }
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
+static void rps_set_power(struct drm_i915_private *i915, int new_power)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 threshold_up = 0, threshold_down = 0; /* in % */
  	u32 ei_up = 0, ei_down = 0;
@@ -6614,23 +6614,23 @@ static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
  	/* When byt can survive without system hang with dynamic
  	 * sw freq adjustments, this restriction can be lifted.
  	 */
-	if (IS_VALLEYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(i915))
  		goto skip_hw_write;
I915_WRITE(GEN6_RP_UP_EI,
-		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
+		   GT_INTERVAL_FROM_US(i915, ei_up));
  	I915_WRITE(GEN6_RP_UP_THRESHOLD,
-		   GT_INTERVAL_FROM_US(dev_priv,
+		   GT_INTERVAL_FROM_US(i915,
  				       ei_up * threshold_up / 100));
I915_WRITE(GEN6_RP_DOWN_EI,
-		   GT_INTERVAL_FROM_US(dev_priv, ei_down));
+		   GT_INTERVAL_FROM_US(i915, ei_down));
  	I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
-		   GT_INTERVAL_FROM_US(dev_priv,
+		   GT_INTERVAL_FROM_US(i915,
  				       ei_down * threshold_down / 100));
I915_WRITE(GEN6_RP_CONTROL,
-		   (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+		   (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
  		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
  		   GEN6_RP_MEDIA_IS_GFX |
  		   GEN6_RP_ENABLE |
@@ -6643,9 +6643,9 @@ static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
  	rps->power.down_threshold = threshold_down;
  }
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static void gen6_set_rps_thresholds(struct drm_i915_private *i915, u8 val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	int new_power;
new_power = rps->power.mode;
@@ -6680,7 +6680,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
  	mutex_lock(&rps->power.mutex);
  	if (rps->power.interactive)
  		new_power = HIGH_POWER;
-	rps_set_power(dev_priv, new_power);
+	rps_set_power(i915, new_power);
  	mutex_unlock(&rps->power.mutex);
  }
@@ -6702,9 +6702,9 @@ void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
  	mutex_unlock(&rps->power.mutex);
  }
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+static u32 gen6_rps_pm_mask(struct drm_i915_private *i915, u8 val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 mask = 0;
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
@@ -6713,28 +6713,28 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
  	if (val < rps->max_freq_softlimit)
  		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= dev_priv->pm_rps_events;
+	mask &= i915->pm_rps_events;
- return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
+	return gen6_sanitize_rps_pm_mask(i915, ~mask);
  }
/* gen6_set_rps is called to update the frequency request, but should also be
   * called when the range (min_delay and max_delay) is modified so that we can
   * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
+static int gen6_set_rps(struct drm_i915_private *i915, u8 val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/* min/max delay may still have been modified so be sure to
  	 * write the limits value.
  	 */
  	if (val != rps->cur_freq) {
-		gen6_set_rps_thresholds(dev_priv, val);
+		gen6_set_rps_thresholds(i915, val);
- if (INTEL_GEN(dev_priv) >= 9)
+		if (INTEL_GEN(i915) >= 9)
  			I915_WRITE(GEN6_RPNSWREQ,
  				   GEN9_FREQUENCY(val));
-		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  			I915_WRITE(GEN6_RPNSWREQ,
  				   HSW_FREQUENCY(val));
  		else
@@ -6747,37 +6747,37 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
  	/* Make sure we continue to get interrupts
  	 * until we hit the minimum or maximum frequencies.
  	 */
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(i915, val));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(i915, val));
rps->cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+	trace_intel_gpu_freq_change(intel_gpu_freq(i915, val));
return 0;
  }
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
+static int valleyview_set_rps(struct drm_i915_private *i915, u8 val)
  {
  	int err;
- if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
+	if (WARN_ONCE(IS_CHERRYVIEW(i915) && (val & 1),
  		      "Odd GPU freq value\n"))
  		val &= ~1;
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(i915, val));
- if (val != dev_priv->gt_pm.rps.cur_freq) {
-		vlv_punit_get(dev_priv);
-		err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-		vlv_punit_put(dev_priv);
+	if (val != i915->gt_pm.rps.cur_freq) {
+		vlv_punit_get(i915);
+		err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+		vlv_punit_put(i915);
  		if (err)
  			return err;
- gen6_set_rps_thresholds(dev_priv, val);
+		gen6_set_rps_thresholds(i915, val);
  	}
- dev_priv->gt_pm.rps.cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+	i915->gt_pm.rps.cur_freq = val;
+	trace_intel_gpu_freq_change(intel_gpu_freq(i915, val));
return 0;
  }
@@ -6789,9 +6789,9 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
   * 2. Request idle freq.
   * 3. Release Forcewake of Media well.
  */
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+static void vlv_set_rps_idle(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 val = rps->idle_freq;
  	int err;
@@ -6810,28 +6810,28 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
  	 * punit into committing the voltage change) as that takes a lot less
  	 * power than the render powerwell.
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
-	err = valleyview_set_rps(dev_priv, val);
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_MEDIA);
+	err = valleyview_set_rps(i915, val);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_MEDIA);
if (err)
  		DRM_ERROR("Failed to set RPS for idle\n");
  }
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
+void gen6_rps_busy(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
mutex_lock(&rps->lock);
  	if (rps->enabled) {
  		u8 freq;
- if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
-			gen6_rps_reset_ei(dev_priv);
+		if (i915->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
+			gen6_rps_reset_ei(i915);
  		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
+			   gen6_rps_pm_mask(i915, rps->cur_freq));
- gen6_enable_rps_interrupts(dev_priv);
+		gen6_enable_rps_interrupts(i915);
/* Use the user's desired frequency as a guide, but for better
  		 * performance, jump directly to RPe as our starting frequency.
@@ -6839,7 +6839,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
  		freq = max(rps->cur_freq,
  			   rps->efficient_freq);
- if (intel_set_rps(dev_priv,
+		if (intel_set_rps(i915,
  				  clamp(freq,
  					rps->min_freq_softlimit,
  					rps->max_freq_softlimit)))
@@ -6848,26 +6848,26 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
  	mutex_unlock(&rps->lock);
  }
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
+void gen6_rps_idle(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/* Flush our bottom-half so that it does not race with us
  	 * setting the idle frequency and so that it is bounded by
  	 * our rpm wakeref. And then disable the interrupts to stop any
  	 * futher RPS reclocking whilst we are asleep.
  	 */
-	gen6_disable_rps_interrupts(dev_priv);
+	gen6_disable_rps_interrupts(i915);
mutex_lock(&rps->lock);
  	if (rps->enabled) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_set_rps_idle(dev_priv);
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+			vlv_set_rps_idle(i915);
  		else
-			gen6_set_rps(dev_priv, rps->idle_freq);
+			gen6_set_rps(i915, rps->idle_freq);
  		rps->last_adj = 0;
  		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+			   gen6_sanitize_rps_pm_mask(i915, ~0));
  	}
  	mutex_unlock(&rps->lock);
  }
@@ -6904,9 +6904,9 @@ void gen6_rps_boost(struct i915_request *rq)
  	atomic_inc(&rps->boosts);
  }
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
+int intel_set_rps(struct drm_i915_private *i915, u8 val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	int err;
lockdep_assert_held(&rps->lock);
@@ -6918,63 +6918,63 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
  		return 0;
  	}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		err = valleyview_set_rps(dev_priv, val);
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		err = valleyview_set_rps(i915, val);
  	else
-		err = gen6_set_rps(dev_priv, val);
+		err = gen6_set_rps(i915, val);
return err;
  }
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
+static void gen9_disable_rc6(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RC_CONTROL, 0);
  	I915_WRITE(GEN9_PG_ENABLE, 0);
  }
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
+static void gen9_disable_rps(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RP_CONTROL, 0);
  }
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
+static void gen6_disable_rc6(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RC_CONTROL, 0);
  }
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+static void gen6_disable_rps(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
  	I915_WRITE(GEN6_RP_CONTROL, 0);
  }
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rc6(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RC_CONTROL, 0);
  }
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rps(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RP_CONTROL, 0);
  }
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
+static void valleyview_disable_rc6(struct drm_i915_private *i915)
  {
  	/* We're doing forcewake before Disabling RC6,
  	 * This what the BIOS expects when going into suspend */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_disable_rps(struct drm_i915_private *i915)
  {
  	I915_WRITE(GEN6_RP_CONTROL, 0);
  }
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
+static bool bxt_check_bios_rc6_setup(struct drm_i915_private *i915)
  {
  	bool enable_rc6 = true;
  	unsigned long rc6_ctx_base;
@@ -7000,8 +7000,8 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
  	 * for this check.
  	 */
  	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
-	if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
-	      (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
+	if (!((rc6_ctx_base >= i915->dsm_reserved.start) &&
+	      (rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end))) {
  		DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
  		enable_rc6 = false;
  	}
@@ -7062,14 +7062,14 @@ static bool sanitize_rc6(struct drm_i915_private *i915)
  	return info->has_rc6;
  }
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
+static void gen6_init_rps_frequencies(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/* All of these values are in units of 50MHz */ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
-	if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
  		rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
  		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
@@ -7084,11 +7084,11 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
  	rps->max_freq = rps->rp0_freq;
rps->efficient_freq = rps->rp1_freq;
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
-	    IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+	    IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
  		u32 ddcc_status = 0;
- if (sandybridge_pcode_read(dev_priv,
+		if (sandybridge_pcode_read(i915,
  					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
  					   &ddcc_status, NULL) == 0)
  			rps->efficient_freq =
@@ -7098,7 +7098,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
  					rps->max_freq);
  	}
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
  		/* Store the frequency values in 16.66 MHZ units, which is
  		 * the natural hardware unit for SKL
  		 */
@@ -7110,45 +7110,45 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
  	}
  }
-static void reset_rps(struct drm_i915_private *dev_priv,
+static void reset_rps(struct drm_i915_private *i915,
  		      int (*set)(struct drm_i915_private *, u8))
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u8 freq = rps->cur_freq;
/* force a reset */
  	rps->power.mode = -1;
  	rps->cur_freq = -1;
- if (set(dev_priv, freq))
+	if (set(i915, freq))
  		DRM_ERROR("Failed to reset RPS to initial values\n");
  }
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
+static void gen9_enable_rps(struct drm_i915_private *i915)
  {
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
-	if (IS_GEN(dev_priv, 9))
+	if (IS_GEN(i915, 9))
  		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
+			GEN9_FREQUENCY(i915->gt_pm.rps.rp1_freq));
/* 1 second timeout*/
  	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
-		GT_INTERVAL_FROM_US(dev_priv, 1000000));
+		GT_INTERVAL_FROM_US(i915, 1000000));
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); /* Leaning on the below call to gen6_set_rps to program/setup the
  	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
  	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
-	reset_rps(dev_priv, gen6_set_rps);
+	reset_rps(i915, gen6_set_rps);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
+static void gen11_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -7160,7 +7160,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
  	 * 1b: Get forcewake during program sequence. Although the driver
  	 * hasn't enabled a state yet where we need forcewake, BIOS may have.
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7171,10 +7171,10 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+	if (HAS_GUC(i915))
  		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7217,10 +7217,10 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
  		   GEN9_MEDIA_PG_ENABLE |
  		   GEN11_MEDIA_SAMPLER_PG_ENABLE);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
+static void gen9_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -7231,16 +7231,16 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
  	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2b: Program RC6 thresholds.*/
-	if (INTEL_GEN(dev_priv) >= 10) {
+	if (INTEL_GEN(i915) >= 10) {
  		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
  		I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-	} else if (IS_SKYLAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915)) {
  		/*
  		 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
  		 * when CPG is enabled
@@ -7252,10 +7252,10 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+	if (HAS_GUC(i915))
  		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7288,7 +7288,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
/* WaRsUseTimeoutMode:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
+	if (IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_C0))
  		rc6_mode = GEN7_RC_CTL_TO_MODE;
  	else
  		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
@@ -7302,16 +7302,16 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
  	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
  	 * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
  	 */
-	if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+	if (NEEDS_WaRsDisableCoarsePowerGating(i915))
  		I915_WRITE(GEN9_PG_ENABLE, 0);
  	else
  		I915_WRITE(GEN9_PG_ENABLE,
  			   GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
+static void gen8_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -7321,7 +7321,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
  	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7330,7 +7330,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
  	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
  	I915_WRITE(GEN6_RC_SLEEP, 0);
  	I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -7342,14 +7342,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
  		   GEN7_RC_CTL_TO_MODE |
  		   GEN6_RC_CTL_RC6_ENABLE);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+static void gen8_enable_rps(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* 1 Program defaults and thresholds for RPS*/
  	I915_WRITE(GEN6_RPNSWREQ,
@@ -7380,12 +7380,12 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
  		   GEN6_RP_UP_BUSY_AVG |
  		   GEN6_RP_DOWN_IDLE_AVG);
- reset_rps(dev_priv, gen6_set_rps);
+	reset_rps(i915, gen6_set_rps);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
+static void gen6_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -7402,7 +7402,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
  		I915_WRITE(GTFIFODBG, gtfifodbg);
  	}
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* disable the counters and set deterministic thresholds */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7413,12 +7413,12 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
  	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-	if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
  	else
  		I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -7427,9 +7427,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
/* We don't use those on Haswell */
  	rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-	if (HAS_RC6p(dev_priv))
+	if (HAS_RC6p(i915))
  		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-	if (HAS_RC6pp(dev_priv))
+	if (HAS_RC6pp(i915))
  		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
  	I915_WRITE(GEN6_RC_CONTROL,
  		   rc6_mask |
@@ -7437,24 +7437,24 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
  		   GEN6_RC_CTL_HW_ENABLE);
rc6vids = 0;
-	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+	ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
  				     &rc6vids, NULL);
-	if (IS_GEN(dev_priv, 6) && ret) {
+	if (IS_GEN(i915, 6) && ret) {
  		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
-	} else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+	} else if (IS_GEN(i915, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
  		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
  			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
  		rc6vids &= 0xffff00;
  		rc6vids |= GEN6_ENCODE_RC6_VID(450);
-		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+		ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
  		if (ret)
  			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
  	}
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rps(struct drm_i915_private *i915)
  {
  	/* Here begins a magic sequence of register writes to enable
  	 * auto-downclocking.
@@ -7462,20 +7462,20 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
  	 * Perhaps there might be some value in exposing these to
  	 * userspace...
  	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* Power down if completely idle for over 50ms */
  	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
  	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- reset_rps(dev_priv, gen6_set_rps);
+	reset_rps(i915, gen6_set_rps);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	const int min_freq = 15;
  	const int scaling_factor = 180;
  	unsigned int gpu_freq;
@@ -7509,7 +7509,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
min_gpu_freq = rps->min_freq;
  	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
  		/* Convert GT frequency to 50 HZ units */
  		min_gpu_freq /= GEN9_FREQ_SCALER;
  		max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -7524,16 +7524,16 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
  		const int diff = max_gpu_freq - gpu_freq;
  		unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
+		if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
  			/*
  			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
  			 * No floor required for ring frequency on SKL.
  			 */
  			ring_freq = gpu_freq;
-		} else if (INTEL_GEN(dev_priv) >= 8) {
+		} else if (INTEL_GEN(i915) >= 8) {
  			/* max(2 * GT, DDR). NB: GT is 50MHz units */
  			ring_freq = max(min_ring_freq, gpu_freq);
-		} else if (IS_HASWELL(dev_priv)) {
+		} else if (IS_HASWELL(i915)) {
  			ring_freq = mult_frac(gpu_freq, 5, 4);
  			ring_freq = max(min_ring_freq, ring_freq);
  			/* leave ia_freq as the default, chosen by cpufreq */
@@ -7552,7 +7552,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
  			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
  		}
- sandybridge_pcode_write(dev_priv,
+		sandybridge_pcode_write(i915,
  					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
  					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
  					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
@@ -7560,13 +7560,13 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
  	}
  }
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *i915)
  {
  	u32 val, rp0;
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
+	switch (RUNTIME_INFO(i915)->sseu.eu_total) {
  	case 8:
  		/* (2 * 4) config */
  		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7588,53 +7588,53 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
  	return rp0;
  }
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_rpe_freq(struct drm_i915_private *i915)
  {
  	u32 val, rpe;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+	val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
  	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
return rpe;
  }
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_guar_freq(struct drm_i915_private *i915)
  {
  	u32 val, rp1;
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
  	rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
return rp1;
  }
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+static u32 cherryview_rps_min_freq(struct drm_i915_private *i915)
  {
  	u32 val, rpn;
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
+	val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
  	rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
  		       FB_GFX_FREQ_FUSE_MASK);
return rpn;
  }
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_guar_freq(struct drm_i915_private *i915)
  {
  	u32 val, rp1;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; return rp1;
  }
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_max_freq(struct drm_i915_private *i915)
  {
  	u32 val, rp0;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
  	/* Clamp to max */
@@ -7643,23 +7643,23 @@ static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
  	return rp0;
  }
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_rpe_freq(struct drm_i915_private *i915)
  {
  	u32 val, rpe;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
  	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
-	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
  	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
  }
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *i915)
  {
  	u32 val;
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
  	/*
  	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
  	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
@@ -7671,24 +7671,24 @@ static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
  }
/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+static void valleyview_check_pctx(struct drm_i915_private *i915)
  {
  	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
- WARN_ON(pctx_addr != dev_priv->dsm.start +
-			     dev_priv->vlv_pctx->stolen->start);
+	WARN_ON(pctx_addr != i915->dsm.start +
+			     i915->vlv_pctx->stolen->start);
  }
/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+static void cherryview_check_pctx(struct drm_i915_private *i915)
  {
  	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
  }
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
+static void cherryview_setup_pctx(struct drm_i915_private *i915)
  {
  	resource_size_t pctx_paddr, paddr;
  	resource_size_t pctx_size = 32*1024;
@@ -7697,7 +7697,7 @@ static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
  	pcbr = I915_READ(VLV_PCBR);
  	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
  		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-		paddr = dev_priv->dsm.end + 1 - pctx_size;
+		paddr = i915->dsm.end + 1 - pctx_size;
  		GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & (~4095));
@@ -7707,7 +7707,7 @@ static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
  }
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
+static void valleyview_setup_pctx(struct drm_i915_private *i915)
  {
  	struct drm_i915_gem_object *pctx;
  	resource_size_t pctx_paddr;
@@ -7719,8 +7719,8 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
  		/* BIOS set it up already, grab the pre-alloc'd space */
  		resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
-		pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
+		pcbr_offset = (pcbr & (~4095)) - i915->dsm.start;
+		pctx = i915_gem_object_create_stolen_for_preallocated(i915,
  								      pcbr_offset,
  								      I915_GTT_OFFSET_NONE,
  								      pctx_size);
@@ -7737,148 +7737,148 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
  	 * overlap with other ranges, such as the frame buffer, protected
  	 * memory, or any other relevant ranges.
  	 */
-	pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
+	pctx = i915_gem_object_create_stolen(i915, pctx_size);
  	if (!pctx) {
  		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
  		goto out;
  	}
GEM_BUG_ON(range_overflows_t(u64,
-				     dev_priv->dsm.start,
+				     i915->dsm.start,
  				     pctx->stolen->start,
  				     U32_MAX));
-	pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
+	pctx_paddr = i915->dsm.start + pctx->stolen->start;
  	I915_WRITE(VLV_PCBR, pctx_paddr);
out:
  	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-	dev_priv->vlv_pctx = pctx;
+	i915->vlv_pctx = pctx;
  }
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
+static void valleyview_cleanup_pctx(struct drm_i915_private *i915)
  {
  	struct drm_i915_gem_object *pctx;
- pctx = fetch_and_zero(&dev_priv->vlv_pctx);
+	pctx = fetch_and_zero(&i915->vlv_pctx);
  	if (pctx)
  		i915_gem_object_put(pctx);
  }
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
+static void vlv_init_gpll_ref_freq(struct drm_i915_private *i915)
  {
-	dev_priv->gt_pm.rps.gpll_ref_freq =
-		vlv_get_cck_clock(dev_priv, "GPLL ref",
+	i915->gt_pm.rps.gpll_ref_freq =
+		vlv_get_cck_clock(i915, "GPLL ref",
  				  CCK_GPLL_CLOCK_CONTROL,
-				  dev_priv->czclk_freq);
+				  i915->czclk_freq);
DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
-			 dev_priv->gt_pm.rps.gpll_ref_freq);
+			 i915->gt_pm.rps.gpll_ref_freq);
  }
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
+static void valleyview_init_gt_powersave(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 val;
- valleyview_setup_pctx(dev_priv);
+	valleyview_setup_pctx(i915);
- vlv_iosf_sb_get(dev_priv,
+	vlv_iosf_sb_get(i915,
  			BIT(VLV_IOSF_SB_PUNIT) |
  			BIT(VLV_IOSF_SB_NC) |
  			BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(dev_priv);
+	vlv_init_gpll_ref_freq(i915);
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
  	switch ((val >> 6) & 3) {
  	case 0:
  	case 1:
-		dev_priv->mem_freq = 800;
+		i915->mem_freq = 800;
  		break;
  	case 2:
-		dev_priv->mem_freq = 1066;
+		i915->mem_freq = 1066;
  		break;
  	case 3:
-		dev_priv->mem_freq = 1333;
+		i915->mem_freq = 1333;
  		break;
  	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
- rps->max_freq = valleyview_rps_max_freq(dev_priv);
+	rps->max_freq = valleyview_rps_max_freq(i915);
  	rps->rp0_freq = rps->max_freq;
  	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->max_freq),
+			 intel_gpu_freq(i915, rps->max_freq),
  			 rps->max_freq);
- rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+	rps->efficient_freq = valleyview_rps_rpe_freq(i915);
  	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->efficient_freq),
+			 intel_gpu_freq(i915, rps->efficient_freq),
  			 rps->efficient_freq);
- rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
+	rps->rp1_freq = valleyview_rps_guar_freq(i915);
  	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->rp1_freq),
+			 intel_gpu_freq(i915, rps->rp1_freq),
  			 rps->rp1_freq);
- rps->min_freq = valleyview_rps_min_freq(dev_priv);
+	rps->min_freq = valleyview_rps_min_freq(i915);
  	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->min_freq),
+			 intel_gpu_freq(i915, rps->min_freq),
  			 rps->min_freq);
- vlv_iosf_sb_put(dev_priv,
+	vlv_iosf_sb_put(i915,
  			BIT(VLV_IOSF_SB_PUNIT) |
  			BIT(VLV_IOSF_SB_NC) |
  			BIT(VLV_IOSF_SB_CCK));
  }
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
+static void cherryview_init_gt_powersave(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
  	u32 val;
- cherryview_setup_pctx(dev_priv);
+	cherryview_setup_pctx(i915);
- vlv_iosf_sb_get(dev_priv,
+	vlv_iosf_sb_get(i915,
  			BIT(VLV_IOSF_SB_PUNIT) |
  			BIT(VLV_IOSF_SB_NC) |
  			BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(dev_priv);
+	vlv_init_gpll_ref_freq(i915);
- val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+	val = vlv_cck_read(i915, CCK_FUSE_REG);
switch ((val >> 2) & 0x7) {
  	case 3:
-		dev_priv->mem_freq = 2000;
+		i915->mem_freq = 2000;
  		break;
  	default:
-		dev_priv->mem_freq = 1600;
+		i915->mem_freq = 1600;
  		break;
  	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
- rps->max_freq = cherryview_rps_max_freq(dev_priv);
+	rps->max_freq = cherryview_rps_max_freq(i915);
  	rps->rp0_freq = rps->max_freq;
  	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->max_freq),
+			 intel_gpu_freq(i915, rps->max_freq),
  			 rps->max_freq);
- rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+	rps->efficient_freq = cherryview_rps_rpe_freq(i915);
  	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->efficient_freq),
+			 intel_gpu_freq(i915, rps->efficient_freq),
  			 rps->efficient_freq);
- rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
+	rps->rp1_freq = cherryview_rps_guar_freq(i915);
  	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->rp1_freq),
+			 intel_gpu_freq(i915, rps->rp1_freq),
  			 rps->rp1_freq);
- rps->min_freq = cherryview_rps_min_freq(dev_priv);
+	rps->min_freq = cherryview_rps_min_freq(i915);
  	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->min_freq),
+			 intel_gpu_freq(i915, rps->min_freq),
  			 rps->min_freq);
- vlv_iosf_sb_put(dev_priv,
+	vlv_iosf_sb_put(i915,
  			BIT(VLV_IOSF_SB_PUNIT) |
  			BIT(VLV_IOSF_SB_NC) |
  			BIT(VLV_IOSF_SB_CCK));
@@ -7888,12 +7888,12 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
  		  "Odd GPU freq values\n");
  }
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
+static void valleyview_cleanup_gt_powersave(struct drm_i915_private *i915)
  {
-	valleyview_cleanup_pctx(dev_priv);
+	valleyview_cleanup_pctx(i915);
  }
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
@@ -7907,11 +7907,11 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
  		I915_WRITE(GTFIFODBG, gtfifodbg);
  	}
- cherryview_check_pctx(dev_priv);
+	cherryview_check_pctx(i915);
/* 1a & 1b: Get forcewake during program sequence. Although the driver
  	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7921,7 +7921,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
  	I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7943,14 +7943,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
  		rc6_mode = GEN7_RC_CTL_TO_MODE;
  	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rps(struct drm_i915_private *i915)
  {
  	u32 val;
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* 1: Program defaults and thresholds for RPS*/
  	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@@ -7970,14 +7970,14 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
  		   GEN6_RP_DOWN_IDLE_AVG);
/* Setting Fixed Bias */
-	vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
-	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
  	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -7985,18 +7985,18 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
  	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- reset_rps(dev_priv, valleyview_set_rps);
+	reset_rps(i915, valleyview_set_rps);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
+static void valleyview_enable_rc6(struct drm_i915_private *i915)
  {
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	u32 gtfifodbg;
- valleyview_check_pctx(dev_priv);
+	valleyview_check_pctx(i915);
gtfifodbg = I915_READ(GTFIFODBG);
  	if (gtfifodbg) {
@@ -8005,7 +8005,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
  		I915_WRITE(GTFIFODBG, gtfifodbg);
  	}
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
  	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -8014,7 +8014,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv, id)
+	for_each_engine(engine, i915, id)
  		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -8030,14 +8030,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN6_RC_CONTROL,
  		   GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_enable_rps(struct drm_i915_private *i915)
  {
  	u32 val;
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
  	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -8055,15 +8055,15 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
  		   GEN6_RP_UP_BUSY_AVG |
  		   GEN6_RP_DOWN_IDLE_CONT);
- vlv_punit_get(dev_priv);
+	vlv_punit_get(i915);
/* Setting Fixed Bias */
  	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
-	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
+	vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
  	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -8071,9 +8071,9 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
  	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- reset_rps(dev_priv, valleyview_set_rps);
+	reset_rps(i915, valleyview_set_rps);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  }
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -8105,7 +8105,7 @@ static const struct cparams {
  	{ 0, 800, 231, 23784 },
  };
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *i915)
  {
  	u64 total_count, diff, ret;
  	u32 count1, count2, count3, m = 0, c = 0;
@@ -8114,7 +8114,7 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
lockdep_assert_held(&mchdev_lock); - diff1 = now - dev_priv->ips.last_time1;
+	diff1 = now - i915->ips.last_time1;
/* Prevent division-by-zero if we are asking too fast.
  	 * Also, we don't get interesting results if we are polling
@@ -8122,7 +8122,7 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  	 * in such cases.
  	 */
  	if (diff1 <= 10)
-		return dev_priv->ips.chipset_power;
+		return i915->ips.chipset_power;
count1 = I915_READ(DMIEC);
  	count2 = I915_READ(DDREC);
@@ -8131,16 +8131,16 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  	total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
-	if (total_count < dev_priv->ips.last_count1) {
-		diff = ~0UL - dev_priv->ips.last_count1;
+	if (total_count < i915->ips.last_count1) {
+		diff = ~0UL - i915->ips.last_count1;
  		diff += total_count;
  	} else {
-		diff = total_count - dev_priv->ips.last_count1;
+		diff = total_count - i915->ips.last_count1;
  	}
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
-		if (cparams[i].i == dev_priv->ips.c_m &&
-		    cparams[i].t == dev_priv->ips.r_t) {
+		if (cparams[i].i == i915->ips.c_m &&
+		    cparams[i].t == i915->ips.r_t) {
  			m = cparams[i].m;
  			c = cparams[i].c;
  			break;
@@ -8151,25 +8151,25 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  	ret = ((m * diff) + c);
  	ret = div_u64(ret, 10);
- dev_priv->ips.last_count1 = total_count;
-	dev_priv->ips.last_time1 = now;
+	i915->ips.last_count1 = total_count;
+	i915->ips.last_time1 = now;
- dev_priv->ips.chipset_power = ret;
+	i915->ips.chipset_power = ret;
return ret;
  }
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+unsigned long i915_chipset_val(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref;
  	unsigned long val = 0;
- if (!IS_GEN(dev_priv, 5))
+	if (!IS_GEN(i915, 5))
  		return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		spin_lock_irq(&mchdev_lock);
-		val = __i915_chipset_val(dev_priv);
+		val = __i915_chipset_val(i915);
  		spin_unlock_irq(&mchdev_lock);
  	}
@@ -8202,18 +8202,18 @@ static int _pxvid_to_vd(u8 pxvid)
  	return (pxvid + 2) * 125;
  }
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
  {
  	const int vd = _pxvid_to_vd(pxvid);
  	const int vm = vd - 1125;
- if (INTEL_INFO(dev_priv)->is_mobile)
+	if (INTEL_INFO(i915)->is_mobile)
  		return vm > 0 ? vm : 0;
return vd;
  }
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *i915)
  {
  	u64 now, diff, diffms;
  	u32 count;
@@ -8221,7 +8221,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
  	lockdep_assert_held(&mchdev_lock);
now = ktime_get_raw_ns();
-	diffms = now - dev_priv->ips.last_time2;
+	diffms = now - i915->ips.last_time2;
  	do_div(diffms, NSEC_PER_MSEC);
/* Don't divide by 0 */
@@ -8230,50 +8230,50 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
count = I915_READ(GFXEC); - if (count < dev_priv->ips.last_count2) {
-		diff = ~0UL - dev_priv->ips.last_count2;
+	if (count < i915->ips.last_count2) {
+		diff = ~0UL - i915->ips.last_count2;
  		diff += count;
  	} else {
-		diff = count - dev_priv->ips.last_count2;
+		diff = count - i915->ips.last_count2;
  	}
- dev_priv->ips.last_count2 = count;
-	dev_priv->ips.last_time2 = now;
+	i915->ips.last_count2 = count;
+	i915->ips.last_time2 = now;
/* More magic constants... */
  	diff = diff * 1181;
  	diff = div_u64(diff, diffms * 10);
-	dev_priv->ips.gfx_power = diff;
+	i915->ips.gfx_power = diff;
  }
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref;
- if (!IS_GEN(dev_priv, 5))
+	if (!IS_GEN(i915, 5))
  		return;
- with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		spin_lock_irq(&mchdev_lock);
-		__i915_update_gfx_val(dev_priv);
+		__i915_update_gfx_val(i915);
  		spin_unlock_irq(&mchdev_lock);
  	}
  }
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_gfx_val(struct drm_i915_private *i915)
  {
  	unsigned long t, corr, state1, corr2, state2;
  	u32 pxvid, ext_v;
lockdep_assert_held(&mchdev_lock); - pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
+	pxvid = I915_READ(PXVFREQ(i915->gt_pm.rps.cur_freq));
  	pxvid = (pxvid >> 24) & 0x7f;
-	ext_v = pvid_to_extvid(dev_priv, pxvid);
+	ext_v = pvid_to_extvid(i915, pxvid);
state1 = ext_v; - t = i915_mch_val(dev_priv);
+	t = i915_mch_val(i915);
/* Revel in the empirically derived constants */ @@ -8287,27 +8287,27 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) corr = corr * ((150142 * state1) / 10000 - 78642);
  	corr /= 100000;
-	corr2 = (corr * dev_priv->ips.corr);
+	corr2 = (corr * i915->ips.corr);
state2 = (corr2 * state1) / 10000;
  	state2 /= 100; /* convert to mW */
- __i915_update_gfx_val(dev_priv);
+	__i915_update_gfx_val(i915);
- return dev_priv->ips.gfx_power + state2;
+	return i915->ips.gfx_power + state2;
  }
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+unsigned long i915_gfx_val(struct drm_i915_private *i915)
  {
  	intel_wakeref_t wakeref;
  	unsigned long val = 0;
- if (!IS_GEN(dev_priv, 5))
+	if (!IS_GEN(i915, 5))
  		return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+	with_intel_runtime_pm(i915, wakeref) {
  		spin_lock_irq(&mchdev_lock);
-		val = __i915_gfx_val(dev_priv);
+		val = __i915_gfx_val(i915);
  		spin_unlock_irq(&mchdev_lock);
  	}
@@ -8471,11 +8471,11 @@ ips_ping_for_i915_load(void)
  	}
  }
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+void intel_gpu_ips_init(struct drm_i915_private *i915)
  {
  	/* We only register the i915 ips part with intel-ips once everything is
  	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
-	rcu_assign_pointer(i915_mch_dev, dev_priv);
+	rcu_assign_pointer(i915_mch_dev, i915);
ips_ping_for_i915_load();
  }
@@ -8485,7 +8485,7 @@ void intel_gpu_ips_teardown(void)
  	rcu_assign_pointer(i915_mch_dev, NULL);
  }
-static void intel_init_emon(struct drm_i915_private *dev_priv)
+static void intel_init_emon(struct drm_i915_private *i915)
  {
  	u32 lcfuse;
  	u8 pxw[16];
@@ -8552,40 +8552,40 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
lcfuse = I915_READ(LCFUSE02); - dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+	i915->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
  }
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_init_gt_powersave(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/*
  	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  	 * requirement.
  	 */
-	if (!sanitize_rc6(dev_priv)) {
+	if (!sanitize_rc6(i915)) {
  		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-		pm_runtime_get(&dev_priv->drm.pdev->dev);
+		pm_runtime_get(&i915->drm.pdev->dev);
  	}
/* Initialize RPS limits (for userspace) */
-	if (IS_CHERRYVIEW(dev_priv))
-		cherryview_init_gt_powersave(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_init_gt_powersave(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_init_rps_frequencies(dev_priv);
+	if (IS_CHERRYVIEW(i915))
+		cherryview_init_gt_powersave(i915);
+	else if (IS_VALLEYVIEW(i915))
+		valleyview_init_gt_powersave(i915);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_init_rps_frequencies(i915);
/* Derive initial user preferences/limits from the hardware limits */
  	rps->max_freq_softlimit = rps->max_freq;
  	rps->min_freq_softlimit = rps->min_freq;
/* After setting max-softlimit, find the overclock max freq */
-	if (IS_GEN(dev_priv, 6) ||
-	    IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+	if (IS_GEN(i915, 6) ||
+	    IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
  		u32 params = 0;
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
+		sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
  				       &params, NULL);
  		if (params & BIT(31)) { /* OC supported */
  			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
@@ -8601,25 +8601,25 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
  	rps->cur_freq = rps->idle_freq;
  }
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_cleanup_gt_powersave(struct drm_i915_private *i915)
  {
-	if (IS_VALLEYVIEW(dev_priv))
-		valleyview_cleanup_gt_powersave(dev_priv);
+	if (IS_VALLEYVIEW(i915))
+		valleyview_cleanup_gt_powersave(i915);
- if (!HAS_RC6(dev_priv))
-		pm_runtime_put(&dev_priv->drm.pdev->dev);
+	if (!HAS_RC6(i915))
+		pm_runtime_put(&i915->drm.pdev->dev);
  }
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_sanitize_gt_powersave(struct drm_i915_private *i915)
  {
-	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
-	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
-	intel_disable_gt_powersave(dev_priv);
+	i915->gt_pm.rps.enabled = true; /* force RPS disabling */
+	i915->gt_pm.rc6.enabled = true; /* force RC6 disabling */
+	intel_disable_gt_powersave(i915);
- if (INTEL_GEN(dev_priv) >= 11)
-		gen11_reset_rps_interrupts(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(i915) >= 11)
+		gen11_reset_rps_interrupts(i915);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_reset_rps_interrupts(i915);
  }
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
@@ -8634,56 +8634,56 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
  	i915->gt_pm.llc_pstate.enabled = false;
  }
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+static void intel_disable_rc6(struct drm_i915_private *i915)
  {
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
+	lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (!dev_priv->gt_pm.rc6.enabled)
+	if (!i915->gt_pm.rc6.enabled)
  		return;
- if (INTEL_GEN(dev_priv) >= 9)
-		gen9_disable_rc6(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		cherryview_disable_rc6(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_disable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_disable_rc6(dev_priv);
+	if (INTEL_GEN(i915) >= 9)
+		gen9_disable_rc6(i915);
+	else if (IS_CHERRYVIEW(i915))
+		cherryview_disable_rc6(i915);
+	else if (IS_VALLEYVIEW(i915))
+		valleyview_disable_rc6(i915);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_disable_rc6(i915);
- dev_priv->gt_pm.rc6.enabled = false;
+	i915->gt_pm.rc6.enabled = false;
  }
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
+static void intel_disable_rps(struct drm_i915_private *i915)
  {
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
+	lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (!dev_priv->gt_pm.rps.enabled)
+	if (!i915->gt_pm.rps.enabled)
  		return;
- if (INTEL_GEN(dev_priv) >= 9)
-		gen9_disable_rps(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		cherryview_disable_rps(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_disable_rps(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_disable_rps(dev_priv);
-	else if (IS_IRONLAKE_M(dev_priv))
-		ironlake_disable_drps(dev_priv);
+	if (INTEL_GEN(i915) >= 9)
+		gen9_disable_rps(i915);
+	else if (IS_CHERRYVIEW(i915))
+		cherryview_disable_rps(i915);
+	else if (IS_VALLEYVIEW(i915))
+		valleyview_disable_rps(i915);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_disable_rps(i915);
+	else if (IS_IRONLAKE_M(i915))
+		ironlake_disable_drps(i915);
- dev_priv->gt_pm.rps.enabled = false;
+	i915->gt_pm.rps.enabled = false;
  }
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_disable_gt_powersave(struct drm_i915_private *i915)
  {
-	mutex_lock(&dev_priv->gt_pm.rps.lock);
+	mutex_lock(&i915->gt_pm.rps.lock);
- intel_disable_rc6(dev_priv);
-	intel_disable_rps(dev_priv);
-	if (HAS_LLC(dev_priv))
-		intel_disable_llc_pstate(dev_priv);
+	intel_disable_rc6(i915);
+	intel_disable_rps(i915);
+	if (HAS_LLC(i915))
+		intel_disable_llc_pstate(i915);
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
+	mutex_unlock(&i915->gt_pm.rps.lock);
  }
static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
@@ -8698,51 +8698,51 @@ static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
  	i915->gt_pm.llc_pstate.enabled = true;
  }
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
+static void intel_enable_rc6(struct drm_i915_private *i915)
  {
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
+	lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (dev_priv->gt_pm.rc6.enabled)
+	if (i915->gt_pm.rc6.enabled)
  		return;
- if (IS_CHERRYVIEW(dev_priv))
-		cherryview_enable_rc6(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 11)
-		gen11_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 9)
-		gen9_enable_rc6(dev_priv);
-	else if (IS_BROADWELL(dev_priv))
-		gen8_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_enable_rc6(dev_priv);
+	if (IS_CHERRYVIEW(i915))
+		cherryview_enable_rc6(i915);
+	else if (IS_VALLEYVIEW(i915))
+		valleyview_enable_rc6(i915);
+	else if (INTEL_GEN(i915) >= 11)
+		gen11_enable_rc6(i915);
+	else if (INTEL_GEN(i915) >= 9)
+		gen9_enable_rc6(i915);
+	else if (IS_BROADWELL(i915))
+		gen8_enable_rc6(i915);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_enable_rc6(i915);
- dev_priv->gt_pm.rc6.enabled = true;
+	i915->gt_pm.rc6.enabled = true;
  }
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
+static void intel_enable_rps(struct drm_i915_private *i915)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
lockdep_assert_held(&rps->lock); if (rps->enabled)
  		return;
- if (IS_CHERRYVIEW(dev_priv)) {
-		cherryview_enable_rps(dev_priv);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		valleyview_enable_rps(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 9) {
-		gen9_enable_rps(dev_priv);
-	} else if (IS_BROADWELL(dev_priv)) {
-		gen8_enable_rps(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 6) {
-		gen6_enable_rps(dev_priv);
-	} else if (IS_IRONLAKE_M(dev_priv)) {
-		ironlake_enable_drps(dev_priv);
-		intel_init_emon(dev_priv);
+	if (IS_CHERRYVIEW(i915)) {
+		cherryview_enable_rps(i915);
+	} else if (IS_VALLEYVIEW(i915)) {
+		valleyview_enable_rps(i915);
+	} else if (INTEL_GEN(i915) >= 9) {
+		gen9_enable_rps(i915);
+	} else if (IS_BROADWELL(i915)) {
+		gen8_enable_rps(i915);
+	} else if (INTEL_GEN(i915) >= 6) {
+		gen6_enable_rps(i915);
+	} else if (IS_IRONLAKE_M(i915)) {
+		ironlake_enable_drps(i915);
+		intel_init_emon(i915);
  	}
WARN_ON(rps->max_freq < rps->min_freq);
@@ -8754,25 +8754,25 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
  	rps->enabled = true;
  }
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_enable_gt_powersave(struct drm_i915_private *i915)
  {
  	/* Powersaving is controlled by the host when inside a VM */
-	if (intel_vgpu_active(dev_priv))
+	if (intel_vgpu_active(i915))
  		return;
- mutex_lock(&dev_priv->gt_pm.rps.lock);
+	mutex_lock(&i915->gt_pm.rps.lock);
- if (HAS_RC6(dev_priv))
-		intel_enable_rc6(dev_priv);
-	if (HAS_RPS(dev_priv))
-		intel_enable_rps(dev_priv);
-	if (HAS_LLC(dev_priv))
-		intel_enable_llc_pstate(dev_priv);
+	if (HAS_RC6(i915))
+		intel_enable_rc6(i915);
+	if (HAS_RPS(i915))
+		intel_enable_rps(i915);
+	if (HAS_LLC(i915))
+		intel_enable_llc_pstate(i915);
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
+	mutex_unlock(&i915->gt_pm.rps.lock);
  }
-static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ibx_init_clock_gating(struct drm_i915_private *i915)
  {
  	/*
  	 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -8782,11 +8782,11 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  }
-static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
+static void g4x_disable_trickle_feed(struct drm_i915_private *i915)
  {
  	enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		I915_WRITE(DSPCNTR(pipe),
  			   I915_READ(DSPCNTR(pipe)) |
  			   DISPPLANE_TRICKLE_FEED_DISABLE);
@@ -8796,7 +8796,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
  	}
  }
-static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ilk_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
@@ -8836,7 +8836,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
  	 * The bit 22 of 0x42004
  	 * The bit 7,8,9 of 0x42020.
  	 */
-	if (IS_IRONLAKE_M(dev_priv)) {
+	if (IS_IRONLAKE_M(i915)) {
  		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
  		I915_WRITE(ILK_DISPLAY_CHICKEN1,
  			   I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -8862,12 +8862,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
  	/* WaDisable_RenderCache_OperationalFlush:ilk */
  	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev_priv);
+	g4x_disable_trickle_feed(i915);
- ibx_init_clock_gating(dev_priv);
+	ibx_init_clock_gating(i915);
  }
-static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cpt_init_clock_gating(struct drm_i915_private *i915)
  {
  	int pipe;
  	u32 val;
@@ -8885,11 +8885,11 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
  	/* The below fixes the weird display corruption, a few pixels shifted
  	 * downward, on (only) LVDS of some HP laptops with IVY.
  	 */
-	for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		val = I915_READ(TRANS_CHICKEN2(pipe));
  		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		if (dev_priv->vbt.fdi_rx_polarity_inverted)
+		if (i915->vbt.fdi_rx_polarity_inverted)
  			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
  		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
@@ -8897,13 +8897,13 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
  		I915_WRITE(TRANS_CHICKEN2(pipe), val);
  	}
  	/* WADP0ClockGatingDisable */
-	for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		I915_WRITE(TRANS_CHICKEN1(pipe),
  			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  	}
  }
-static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
+static void gen6_check_mch_setup(struct drm_i915_private *i915)
  {
  	u32 tmp;
@@ -8913,7 +8913,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
  			      tmp);
  }
-static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen6_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
@@ -9000,14 +9000,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
  		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
  		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- g4x_disable_trickle_feed(dev_priv);
+	g4x_disable_trickle_feed(i915);
- cpt_init_clock_gating(dev_priv);
+	cpt_init_clock_gating(i915);
- gen6_check_mch_setup(dev_priv);
+	gen6_check_mch_setup(i915);
  }
-static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *i915)
  {
  	u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
@@ -9025,13 +9025,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
  	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
  }
-static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
+static void lpt_init_clock_gating(struct drm_i915_private *i915)
  {
  	/*
  	 * TODO: this bit should only be enabled when really needed, then
  	 * disabled when not needed anymore in order to save power.
  	 */
-	if (HAS_PCH_LPT_LP(dev_priv))
+	if (HAS_PCH_LPT_LP(i915))
  		I915_WRITE(SOUTH_DSPCLK_GATE_D,
  			   I915_READ(SOUTH_DSPCLK_GATE_D) |
  			   PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -9042,9 +9042,9 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
  		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  }
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
+static void lpt_suspend_hw(struct drm_i915_private *i915)
  {
-	if (HAS_PCH_LPT_LP(dev_priv)) {
+	if (HAS_PCH_LPT_LP(i915)) {
  		u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -9052,7 +9052,7 @@ static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
  	}
  }
-static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+static void gen8_set_l3sqc_credits(struct drm_i915_private *i915,
  				   int general_prio_credits,
  				   int high_prio_credits)
  {
@@ -9078,7 +9078,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
  	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  }
-static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void icl_init_clock_gating(struct drm_i915_private *i915)
  {
  	/* This is not an Wa. Enable to reduce Sampler power */
  	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
@@ -9089,9 +9089,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
  		   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
  }
-static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cnp_init_clock_gating(struct drm_i915_private *i915)
  {
-	if (!HAS_PCH_CNP(dev_priv))
+	if (!HAS_PCH_CNP(i915))
  		return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
@@ -9099,10 +9099,10 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
  		   CNP_PWM_CGE_GATING_DISABLE);
  }
-static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cnl_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 val;
-	cnp_init_clock_gating(dev_priv);
+	cnp_init_clock_gating(i915);
/* This is not an Wa. Enable for better image quality */
  	I915_WRITE(_3D_CHICKEN3,
@@ -9120,7 +9120,7 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
  	/* ReadHitWriteOnlyDisable:cnl */
  	val |= RCCUNIT_CLKGATE_DIS;
  	/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
+	if (IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_B0))
  		val |= SARBUNIT_CLKGATE_DIS;
  	I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
@@ -9136,27 +9136,27 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
  }
-static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cfl_init_clock_gating(struct drm_i915_private *i915)
  {
-	cnp_init_clock_gating(dev_priv);
-	gen9_init_clock_gating(dev_priv);
+	cnp_init_clock_gating(i915);
+	gen9_init_clock_gating(i915);
/* WaFbcNukeOnHostModify:cfl */
  	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
  		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
  }
-static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void kbl_init_clock_gating(struct drm_i915_private *i915)
  {
-	gen9_init_clock_gating(dev_priv);
+	gen9_init_clock_gating(i915);
/* WaDisableSDEUnitClockGating:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
  		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
  		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
  			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -9165,9 +9165,9 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
  		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
  }
-static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void skl_init_clock_gating(struct drm_i915_private *i915)
  {
-	gen9_init_clock_gating(dev_priv);
+	gen9_init_clock_gating(i915);
/* WAC6entrylatency:skl */
  	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -9178,10 +9178,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
  		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
  }
-static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
+static void bdw_init_clock_gating(struct drm_i915_private *i915)
  {
  	/* The GTT cache must be disabled if the system is using 2M pages. */
-	bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
+	bool can_use_gtt_cache = !HAS_PAGE_SIZES(i915,
  						 I915_GTT_PAGE_SIZE_2M);
  	enum pipe pipe;
@@ -9193,7 +9193,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
  		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
-	for_each_pipe(dev_priv, pipe) {
+	for_each_pipe(i915, pipe) {
  		I915_WRITE(CHICKEN_PIPESL_1(pipe),
  			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
  			   BDW_DPRS_MASK_VBLANK_SRD);
@@ -9213,7 +9213,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
  		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaProgramL3SqcReg1Default:bdw */
-	gen8_set_l3sqc_credits(dev_priv, 30, 2);
+	gen8_set_l3sqc_credits(i915, 30, 2);
/* WaGttCachingOffByDefault:bdw */
  	I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
@@ -9222,7 +9222,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
  		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
- lpt_init_clock_gating(dev_priv);
+	lpt_init_clock_gating(i915);
/* WaDisableDopClockGating:bdw
  	 *
@@ -9233,7 +9233,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
  		   I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
  }
-static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
+static void hsw_init_clock_gating(struct drm_i915_private *i915)
  {
  	/* L3 caching of data atomics doesn't work -- disable it. */
  	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -9278,10 +9278,10 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
  	/* WaSwitchSolVfFArbitrationPriority:hsw */
  	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
- lpt_init_clock_gating(dev_priv);
+	lpt_init_clock_gating(i915);
  }
-static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ivb_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 snpcr;
@@ -9297,7 +9297,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
  		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable:ivb */
-	if (IS_IVB_GT1(dev_priv))
+	if (IS_IVB_GT1(i915))
  		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -9313,7 +9313,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
  			GEN7_WA_FOR_GEN7_L3_CONTROL);
  	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
  		   GEN7_WA_L3_CHICKEN_MODE);
-	if (IS_IVB_GT1(dev_priv))
+	if (IS_IVB_GT1(i915))
  		I915_WRITE(GEN7_ROW_CHICKEN2,
  			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  	else {
@@ -9340,9 +9340,9 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
  			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev_priv);
+	g4x_disable_trickle_feed(i915);
- gen7_setup_fixed_func_scheduler(dev_priv);
+	gen7_setup_fixed_func_scheduler(i915);
if (0) { /* causes HiZ corruption on ivb:gt1 */
  		/* enable HiZ Raw Stall Optimization */
@@ -9370,13 +9370,13 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
  	snpcr |= GEN6_MBC_SNPCR_MED;
  	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- if (!HAS_PCH_NOP(dev_priv))
-		cpt_init_clock_gating(dev_priv);
+	if (!HAS_PCH_NOP(i915))
+		cpt_init_clock_gating(i915);
- gen6_check_mch_setup(dev_priv);
+	gen6_check_mch_setup(i915);
  }
-static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_clock_gating(struct drm_i915_private *i915)
  {
  	/* WaDisableEarlyCull:vlv */
  	I915_WRITE(_3D_CHICKEN3,
@@ -9409,7 +9409,7 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
  		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- gen7_setup_fixed_func_scheduler(dev_priv);
+	gen7_setup_fixed_func_scheduler(i915);
/*
  	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
@@ -9456,7 +9456,7 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
  }
-static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
+static void chv_init_clock_gating(struct drm_i915_private *i915)
  {
  	/* WaVSRefCountFullforceMissDisable:chv */
  	/* WaDSRefCountFullforceMissDisable:chv */
@@ -9481,7 +9481,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
  	 * See gfxspecs/Related Documents/Performance Guide/
  	 * LSQC Setting Recommendations.
  	 */
-	gen8_set_l3sqc_credits(dev_priv, 38, 2);
+	gen8_set_l3sqc_credits(i915, 38, 2);
/*
  	 * GTT cache may not work with big pages, so if those
@@ -9490,7 +9490,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
  }
-static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
+static void g4x_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 dspclk_gate;
@@ -9502,7 +9502,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
  	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
  		OVRUNIT_CLOCK_GATE_DISABLE |
  		OVCUNIT_CLOCK_GATE_DISABLE;
-	if (IS_GM45(dev_priv))
+	if (IS_GM45(i915))
  		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
  	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
@@ -9513,12 +9513,12 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
  	/* WaDisable_RenderCache_OperationalFlush:g4x */
  	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev_priv);
+	g4x_disable_trickle_feed(i915);
  }
-static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965gm_init_clock_gating(struct drm_i915_private *i915)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
  	intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
@@ -9535,7 +9535,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
  			   _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  }
-static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965g_init_clock_gating(struct drm_i915_private *i915)
  {
  	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
  		   I965_RCC_CLOCK_GATE_DISABLE |
@@ -9550,7 +9550,7 @@ static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
  	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  }
-static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen3_init_clock_gating(struct drm_i915_private *i915)
  {
  	u32 dstate = I915_READ(D_STATE);
@@ -9558,7 +9558,7 @@ static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
  		DSTATE_DOT_CLOCK_GATING;
  	I915_WRITE(D_STATE, dstate);
- if (IS_PINEVIEW(dev_priv))
+	if (IS_PINEVIEW(i915))
  		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
@@ -9574,7 +9574,7 @@ static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
  		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  }
-static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i85x_init_clock_gating(struct drm_i915_private *i915)
  {
  	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
@@ -9586,168 +9586,168 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
  		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
  }
-static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i830_init_clock_gating(struct drm_i915_private *i915)
  {
  	I915_WRITE(MEM_MODE,
  		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
  		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
  }
-void intel_init_clock_gating(struct drm_i915_private *dev_priv)
+void intel_init_clock_gating(struct drm_i915_private *i915)
  {
-	dev_priv->display.init_clock_gating(dev_priv);
+	i915->display.init_clock_gating(i915);
  }
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
+void intel_suspend_hw(struct drm_i915_private *i915)
  {
-	if (HAS_PCH_LPT(dev_priv))
-		lpt_suspend_hw(dev_priv);
+	if (HAS_PCH_LPT(i915))
+		lpt_suspend_hw(i915);
  }
-static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
+static void nop_init_clock_gating(struct drm_i915_private *i915)
  {
  	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
  }
/**
   * intel_init_clock_gating_hooks - setup the clock gating hooks
- * @dev_priv: device private
+ * @i915: device private
   *
   * Setup the hooks that configure which clocks of a given platform can be
   * gated and also apply various GT and display specific workarounds for these
   * platforms. Note that some GT specific workarounds are applied separately
   * when GPU contexts or batchbuffers start their execution.
   */
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
-{
-	if (IS_GEN(dev_priv, 11))
-		dev_priv->display.init_clock_gating = icl_init_clock_gating;
-	else if (IS_CANNONLAKE(dev_priv))
-		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
-	else if (IS_COFFEELAKE(dev_priv))
-		dev_priv->display.init_clock_gating = cfl_init_clock_gating;
-	else if (IS_SKYLAKE(dev_priv))
-		dev_priv->display.init_clock_gating = skl_init_clock_gating;
-	else if (IS_KABYLAKE(dev_priv))
-		dev_priv->display.init_clock_gating = kbl_init_clock_gating;
-	else if (IS_BROXTON(dev_priv))
-		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
-	else if (IS_GEMINILAKE(dev_priv))
-		dev_priv->display.init_clock_gating = glk_init_clock_gating;
-	else if (IS_BROADWELL(dev_priv))
-		dev_priv->display.init_clock_gating = bdw_init_clock_gating;
-	else if (IS_CHERRYVIEW(dev_priv))
-		dev_priv->display.init_clock_gating = chv_init_clock_gating;
-	else if (IS_HASWELL(dev_priv))
-		dev_priv->display.init_clock_gating = hsw_init_clock_gating;
-	else if (IS_IVYBRIDGE(dev_priv))
-		dev_priv->display.init_clock_gating = ivb_init_clock_gating;
-	else if (IS_VALLEYVIEW(dev_priv))
-		dev_priv->display.init_clock_gating = vlv_init_clock_gating;
-	else if (IS_GEN(dev_priv, 6))
-		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-	else if (IS_GEN(dev_priv, 5))
-		dev_priv->display.init_clock_gating = ilk_init_clock_gating;
-	else if (IS_G4X(dev_priv))
-		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
-	else if (IS_I965GM(dev_priv))
-		dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
-	else if (IS_I965G(dev_priv))
-		dev_priv->display.init_clock_gating = i965g_init_clock_gating;
-	else if (IS_GEN(dev_priv, 3))
-		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
-	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
-		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-	else if (IS_GEN(dev_priv, 2))
-		dev_priv->display.init_clock_gating = i830_init_clock_gating;
+void intel_init_clock_gating_hooks(struct drm_i915_private *i915)
+{
+	if (IS_GEN(i915, 11))
+		i915->display.init_clock_gating = icl_init_clock_gating;
+	else if (IS_CANNONLAKE(i915))
+		i915->display.init_clock_gating = cnl_init_clock_gating;
+	else if (IS_COFFEELAKE(i915))
+		i915->display.init_clock_gating = cfl_init_clock_gating;
+	else if (IS_SKYLAKE(i915))
+		i915->display.init_clock_gating = skl_init_clock_gating;
+	else if (IS_KABYLAKE(i915))
+		i915->display.init_clock_gating = kbl_init_clock_gating;
+	else if (IS_BROXTON(i915))
+		i915->display.init_clock_gating = bxt_init_clock_gating;
+	else if (IS_GEMINILAKE(i915))
+		i915->display.init_clock_gating = glk_init_clock_gating;
+	else if (IS_BROADWELL(i915))
+		i915->display.init_clock_gating = bdw_init_clock_gating;
+	else if (IS_CHERRYVIEW(i915))
+		i915->display.init_clock_gating = chv_init_clock_gating;
+	else if (IS_HASWELL(i915))
+		i915->display.init_clock_gating = hsw_init_clock_gating;
+	else if (IS_IVYBRIDGE(i915))
+		i915->display.init_clock_gating = ivb_init_clock_gating;
+	else if (IS_VALLEYVIEW(i915))
+		i915->display.init_clock_gating = vlv_init_clock_gating;
+	else if (IS_GEN(i915, 6))
+		i915->display.init_clock_gating = gen6_init_clock_gating;
+	else if (IS_GEN(i915, 5))
+		i915->display.init_clock_gating = ilk_init_clock_gating;
+	else if (IS_G4X(i915))
+		i915->display.init_clock_gating = g4x_init_clock_gating;
+	else if (IS_I965GM(i915))
+		i915->display.init_clock_gating = i965gm_init_clock_gating;
+	else if (IS_I965G(i915))
+		i915->display.init_clock_gating = i965g_init_clock_gating;
+	else if (IS_GEN(i915, 3))
+		i915->display.init_clock_gating = gen3_init_clock_gating;
+	else if (IS_I85X(i915) || IS_I865G(i915))
+		i915->display.init_clock_gating = i85x_init_clock_gating;
+	else if (IS_GEN(i915, 2))
+		i915->display.init_clock_gating = i830_init_clock_gating;
  	else {
-		MISSING_CASE(INTEL_DEVID(dev_priv));
-		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+		MISSING_CASE(INTEL_DEVID(i915));
+		i915->display.init_clock_gating = nop_init_clock_gating;
  	}
  }
/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
+void intel_init_pm(struct drm_i915_private *i915)
  {
  	/* For cxsr */
-	if (IS_PINEVIEW(dev_priv))
-		i915_pineview_get_mem_freq(dev_priv);
-	else if (IS_GEN(dev_priv, 5))
-		i915_ironlake_get_mem_freq(dev_priv);
+	if (IS_PINEVIEW(i915))
+		i915_pineview_get_mem_freq(i915);
+	else if (IS_GEN(i915, 5))
+		i915_ironlake_get_mem_freq(i915);
/* For FIFO watermark updates */
-	if (INTEL_GEN(dev_priv) >= 9) {
-		skl_setup_wm_latency(dev_priv);
-		dev_priv->display.initial_watermarks = skl_initial_wm;
-		dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
-		dev_priv->display.compute_global_watermarks = skl_compute_wm;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		ilk_setup_wm_latency(dev_priv);
-
-		if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
-		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
-		    (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
-		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
-			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
-			dev_priv->display.compute_intermediate_wm =
+	if (INTEL_GEN(i915) >= 9) {
+		skl_setup_wm_latency(i915);
+		i915->display.initial_watermarks = skl_initial_wm;
+		i915->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
+		i915->display.compute_global_watermarks = skl_compute_wm;
+	} else if (HAS_PCH_SPLIT(i915)) {
+		ilk_setup_wm_latency(i915);
+
+		if ((IS_GEN(i915, 5) && i915->wm.pri_latency[1] &&
+		     i915->wm.spr_latency[1] && i915->wm.cur_latency[1]) ||
+		    (!IS_GEN(i915, 5) && i915->wm.pri_latency[0] &&
+		     i915->wm.spr_latency[0] && i915->wm.cur_latency[0])) {
+			i915->display.compute_pipe_wm = ilk_compute_pipe_wm;
+			i915->display.compute_intermediate_wm =
  				ilk_compute_intermediate_wm;
-			dev_priv->display.initial_watermarks =
+			i915->display.initial_watermarks =
  				ilk_initial_watermarks;
-			dev_priv->display.optimize_watermarks =
+			i915->display.optimize_watermarks =
  				ilk_optimize_watermarks;
  		} else {
  			DRM_DEBUG_KMS("Failed to read display plane latency. "
  				      "Disable CxSR\n");
  		}
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_setup_wm_latency(dev_priv);
-		dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
-		dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
-		dev_priv->display.initial_watermarks = vlv_initial_watermarks;
-		dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
-		dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
-	} else if (IS_G4X(dev_priv)) {
-		g4x_setup_wm_latency(dev_priv);
-		dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
-		dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
-		dev_priv->display.initial_watermarks = g4x_initial_watermarks;
-		dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
-	} else if (IS_PINEVIEW(dev_priv)) {
-		if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-					    dev_priv->is_ddr3,
-					    dev_priv->fsb_freq,
-					    dev_priv->mem_freq)) {
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		vlv_setup_wm_latency(i915);
+		i915->display.compute_pipe_wm = vlv_compute_pipe_wm;
+		i915->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
+		i915->display.initial_watermarks = vlv_initial_watermarks;
+		i915->display.optimize_watermarks = vlv_optimize_watermarks;
+		i915->display.atomic_update_watermarks = vlv_atomic_update_fifo;
+	} else if (IS_G4X(i915)) {
+		g4x_setup_wm_latency(i915);
+		i915->display.compute_pipe_wm = g4x_compute_pipe_wm;
+		i915->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
+		i915->display.initial_watermarks = g4x_initial_watermarks;
+		i915->display.optimize_watermarks = g4x_optimize_watermarks;
+	} else if (IS_PINEVIEW(i915)) {
+		if (!intel_get_cxsr_latency(!IS_MOBILE(i915),
+					    i915->is_ddr3,
+					    i915->fsb_freq,
+					    i915->mem_freq)) {
  			DRM_INFO("failed to find known CxSR latency "
  				 "(found ddr%s fsb freq %d, mem freq %d), "
  				 "disabling CxSR\n",
-				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
-				 dev_priv->fsb_freq, dev_priv->mem_freq);
+				 (i915->is_ddr3 == 1) ? "3" : "2",
+				 i915->fsb_freq, i915->mem_freq);
  			/* Disable CxSR and never update its watermark again */
-			intel_set_memory_cxsr(dev_priv, false);
-			dev_priv->display.update_wm = NULL;
+			intel_set_memory_cxsr(i915, false);
+			i915->display.update_wm = NULL;
  		} else
-			dev_priv->display.update_wm = pineview_update_wm;
-	} else if (IS_GEN(dev_priv, 4)) {
-		dev_priv->display.update_wm = i965_update_wm;
-	} else if (IS_GEN(dev_priv, 3)) {
-		dev_priv->display.update_wm = i9xx_update_wm;
-		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
-	} else if (IS_GEN(dev_priv, 2)) {
-		if (INTEL_INFO(dev_priv)->num_pipes == 1) {
-			dev_priv->display.update_wm = i845_update_wm;
-			dev_priv->display.get_fifo_size = i845_get_fifo_size;
+			i915->display.update_wm = pineview_update_wm;
+	} else if (IS_GEN(i915, 4)) {
+		i915->display.update_wm = i965_update_wm;
+	} else if (IS_GEN(i915, 3)) {
+		i915->display.update_wm = i9xx_update_wm;
+		i915->display.get_fifo_size = i9xx_get_fifo_size;
+	} else if (IS_GEN(i915, 2)) {
+		if (INTEL_INFO(i915)->num_pipes == 1) {
+			i915->display.update_wm = i845_update_wm;
+			i915->display.get_fifo_size = i845_get_fifo_size;
  		} else {
-			dev_priv->display.update_wm = i9xx_update_wm;
-			dev_priv->display.get_fifo_size = i830_get_fifo_size;
+			i915->display.update_wm = i9xx_update_wm;
+			i915->display.get_fifo_size = i830_get_fifo_size;
  		}
  	} else {
  		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
  	}
  }
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *i915, int val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/*
  	 * N = val - 0xb7
@@ -9756,16 +9756,16 @@ static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
  	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
  }
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *i915, int val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
  }
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int chv_gpu_freq(struct drm_i915_private *i915, int val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/*
  	 * N = val / 2
@@ -9774,52 +9774,52 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
  }
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int chv_freq_opcode(struct drm_i915_private *i915, int val)
  {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &i915->gt_pm.rps;
/* CHV needs even values */
  	return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
  }
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
+int intel_gpu_freq(struct drm_i915_private *i915, int val)
  {
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
  					 GEN9_FREQ_SCALER);
-	else if (IS_CHERRYVIEW(dev_priv))
-		return chv_gpu_freq(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv))
-		return byt_gpu_freq(dev_priv, val);
+	else if (IS_CHERRYVIEW(i915))
+		return chv_gpu_freq(i915, val);
+	else if (IS_VALLEYVIEW(i915))
+		return byt_gpu_freq(i915, val);
  	else
  		return val * GT_FREQUENCY_MULTIPLIER;
  }
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
+int intel_freq_opcode(struct drm_i915_private *i915, int val)
  {
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
  					 GT_FREQUENCY_MULTIPLIER);
-	else if (IS_CHERRYVIEW(dev_priv))
-		return chv_freq_opcode(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv))
-		return byt_freq_opcode(dev_priv, val);
+	else if (IS_CHERRYVIEW(i915))
+		return chv_freq_opcode(i915, val);
+	else if (IS_VALLEYVIEW(i915))
+		return byt_freq_opcode(i915, val);
  	else
  		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
  }
-void intel_pm_setup(struct drm_i915_private *dev_priv)
+void intel_pm_setup(struct drm_i915_private *i915)
  {
-	mutex_init(&dev_priv->gt_pm.rps.lock);
-	mutex_init(&dev_priv->gt_pm.rps.power.mutex);
+	mutex_init(&i915->gt_pm.rps.lock);
+	mutex_init(&i915->gt_pm.rps.power.mutex);
- atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
+	atomic_set(&i915->gt_pm.rps.num_waiters, 0);
- dev_priv->runtime_pm.suspended = false;
-	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
+	i915->runtime_pm.suspended = false;
+	atomic_set(&i915->runtime_pm.wakeref_count, 0);
  }
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
+static u64 vlv_residency_raw(struct drm_i915_private *i915,
  			     const i915_reg_t reg)
  {
  	u32 lower, upper, tmp;
@@ -9829,7 +9829,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
  	 * The register accessed do not need forcewake. We borrow
  	 * uncore lock to prevent concurrent access to range reg.
  	 */
-	lockdep_assert_held(&dev_priv->uncore.lock);
+	lockdep_assert_held(&i915->uncore.lock);
/*
  	 * vlv and chv residency counters are 40 bits in width.
@@ -9865,17 +9865,17 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
  	return lower | (u64)upper << 8;
  }
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *i915,
  			   const i915_reg_t reg)
  {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
  	u64 time_hw, prev_hw, overflow_hw;
  	unsigned int fw_domains;
  	unsigned long flags;
  	unsigned int i;
  	u32 mul, div;
- if (!HAS_RC6(dev_priv))
+	if (!HAS_RC6(i915))
  		return 0;
/*
@@ -9887,7 +9887,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
  	 */
  	i = (i915_mmio_reg_offset(reg) -
  	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
-	if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
+	if (WARN_ON_ONCE(i >= ARRAY_SIZE(i915->gt_pm.rc6.cur_residency)))
  		return 0;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
@@ -9896,14 +9896,14 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
  	intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		mul = 1000000;
-		div = dev_priv->czclk_freq;
+		div = i915->czclk_freq;
  		overflow_hw = BIT_ULL(40);
-		time_hw = vlv_residency_raw(dev_priv, reg);
+		time_hw = vlv_residency_raw(i915, reg);
  	} else {
  		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
-		if (IS_GEN9_LP(dev_priv)) {
+		if (IS_GEN9_LP(i915)) {
  			mul = 10000;
  			div = 12;
  		} else {
@@ -9921,8 +9921,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
  	 * But relying on a sufficient frequency of queries otherwise counters
  	 * can still wrap.
  	 */
-	prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
-	dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
+	prev_hw = i915->gt_pm.rc6.prev_hw_residency[i];
+	i915->gt_pm.rc6.prev_hw_residency[i] = time_hw;
/* RC6 delta from last sample. */
  	if (time_hw >= prev_hw)
@@ -9931,8 +9931,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
  		time_hw += overflow_hw - prev_hw;
/* Add delta to RC6 extended raw driver copy. */
-	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
-	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
+	time_hw += i915->gt_pm.rc6.cur_residency[i];
+	i915->gt_pm.rc6.cur_residency[i] = time_hw;
intel_uncore_forcewake_put__locked(uncore, fw_domains);
  	spin_unlock_irqrestore(&uncore->lock, flags);
@@ -9940,19 +9940,19 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
  	return mul_u64_u32_div(time_hw, mul, div);
  }
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_us(struct drm_i915_private *i915,
  			   i915_reg_t reg)
  {
-	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(i915, reg), 1000);
  }
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
+u32 intel_get_cagf(struct drm_i915_private *i915, u32 rpstat)
  {
  	u32 cagf;
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
  	else
  		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 1b489fa399e1..e2c4edf263f7 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -22,39 +22,39 @@ struct skl_ddb_entry;
  struct skl_pipe_wm;
  struct skl_wm_level;
-void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_init_clock_gating(struct drm_i915_private *i915);
+void intel_suspend_hw(struct drm_i915_private *i915);
+int ilk_wm_max_level(const struct drm_i915_private *i915);
  void intel_update_watermarks(struct intel_crtc *crtc);
-void intel_init_pm(struct drm_i915_private *dev_priv);
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_init_pm(struct drm_i915_private *i915);
+void intel_init_clock_gating_hooks(struct drm_i915_private *i915);
+void intel_pm_setup(struct drm_i915_private *i915);
+void intel_gpu_ips_init(struct drm_i915_private *i915);
  void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void intel_init_gt_powersave(struct drm_i915_private *i915);
+void intel_cleanup_gt_powersave(struct drm_i915_private *i915);
+void intel_sanitize_gt_powersave(struct drm_i915_private *i915);
+void intel_enable_gt_powersave(struct drm_i915_private *i915);
+void intel_disable_gt_powersave(struct drm_i915_private *i915);
+void gen6_rps_busy(struct drm_i915_private *i915);
+void gen6_rps_idle(struct drm_i915_private *i915);
  void gen6_rps_boost(struct i915_request *rq);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void g4x_wm_get_hw_state(struct drm_i915_private *i915);
+void vlv_wm_get_hw_state(struct drm_i915_private *i915);
+void ilk_wm_get_hw_state(struct drm_i915_private *i915);
+void skl_wm_get_hw_state(struct drm_i915_private *i915);
  void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
  			       struct skl_ddb_entry *ddb_y,
  			       struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+void skl_ddb_get_hw_state(struct drm_i915_private *i915,
  			  struct skl_ddb_allocation *ddb /* out */);
  void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
  			      struct skl_pipe_wm *out);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void g4x_wm_sanitize(struct drm_i915_private *i915);
+void vlv_wm_sanitize(struct drm_i915_private *i915);
  bool intel_can_enable_sagv(struct drm_atomic_state *state);
-int intel_enable_sagv(struct drm_i915_private *dev_priv);
-int intel_disable_sagv(struct drm_i915_private *dev_priv);
+int intel_enable_sagv(struct drm_i915_private *i915);
+int intel_disable_sagv(struct drm_i915_private *i915);
  bool skl_wm_level_equals(const struct skl_wm_level *l1,
  			 const struct skl_wm_level *l2);
  bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -67,24 +67,24 @@ void skl_write_cursor_wm(struct intel_plane *plane,
  bool ilk_disable_lp_wm(struct drm_device *dev);
  int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
  				  struct intel_crtc_state *cstate);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
+void intel_init_ipc(struct drm_i915_private *i915);
+void intel_enable_ipc(struct drm_i915_private *i915);
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
+int intel_gpu_freq(struct drm_i915_private *i915, int val);
+int intel_freq_opcode(struct drm_i915_private *i915, int val);
+u64 intel_rc6_residency_ns(struct drm_i915_private *i915, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct drm_i915_private *i915, i915_reg_t reg);
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+u32 intel_get_cagf(struct drm_i915_private *i915, u32 rpstat1);
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+unsigned long i915_chipset_val(struct drm_i915_private *i915);
+unsigned long i915_mch_val(struct drm_i915_private *i915);
+unsigned long i915_gfx_val(struct drm_i915_private *i915);
+void i915_update_gfx_val(struct drm_i915_private *i915);
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+bool ironlake_set_drps(struct drm_i915_private *i915, u8 val);
+int intel_set_rps(struct drm_i915_private *i915, u8 val);
  void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 01ca502099df..072eea00b17a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,14 +71,14 @@ static bool psr_global_enabled(u32 debug)
  	}
  }
-static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
+static bool intel_psr2_enabled(struct drm_i915_private *i915,
  			       const struct intel_crtc_state *crtc_state)
  {
  	/* Cannot enable DSC and PSR2 simultaneously */
  	WARN_ON(crtc_state->dsc_params.compression_enable &&
  		crtc_state->has_psr2);
- switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+	switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
  	case I915_PSR_DEBUG_DISABLE:
  	case I915_PSR_DEBUG_FORCE_PSR1:
  		return false;
@@ -104,20 +104,20 @@ static int edp_psr_shift(enum transcoder cpu_transcoder)
  	}
  }
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
+void intel_psr_irq_control(struct drm_i915_private *i915, u32 debug)
  {
  	u32 debug_mask, mask;
  	enum transcoder cpu_transcoder;
  	u32 transcoders = BIT(TRANSCODER_EDP);
- if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		transcoders |= BIT(TRANSCODER_A) |
  			       BIT(TRANSCODER_B) |
  			       BIT(TRANSCODER_C);
debug_mask = 0;
  	mask = 0;
-	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+	for_each_cpu_transcoder_masked(i915, cpu_transcoder, transcoders) {
  		int shift = edp_psr_shift(cpu_transcoder);
mask |= EDP_PSR_ERROR(shift);
@@ -168,26 +168,26 @@ static void psr_event_print(u32 val, bool psr2_enabled)
  		DRM_DEBUG_KMS("\tPSR disabled\n");
  }
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct drm_i915_private *i915, u32 psr_iir)
  {
  	u32 transcoders = BIT(TRANSCODER_EDP);
  	enum transcoder cpu_transcoder;
  	ktime_t time_ns =  ktime_get();
  	u32 mask = 0;
- if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		transcoders |= BIT(TRANSCODER_A) |
  			       BIT(TRANSCODER_B) |
  			       BIT(TRANSCODER_C);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+	for_each_cpu_transcoder_masked(i915, cpu_transcoder, transcoders) {
  		int shift = edp_psr_shift(cpu_transcoder);
if (psr_iir & EDP_PSR_ERROR(shift)) {
  			DRM_WARN("[transcoder %s] PSR aux error\n",
  				 transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+			i915->psr.irq_aux_error = true;
/*
  			 * If this interruption is not masked it will keep
@@ -201,19 +201,19 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
  		}
if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
-			dev_priv->psr.last_entry_attempt = time_ns;
+			i915->psr.last_entry_attempt = time_ns;
  			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
  				      transcoder_name(cpu_transcoder));
  		}
if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
-			dev_priv->psr.last_exit = time_ns;
+			i915->psr.last_exit = time_ns;
  			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
  				      transcoder_name(cpu_transcoder));
- if (INTEL_GEN(dev_priv) >= 9) {
+			if (INTEL_GEN(i915) >= 9) {
  				u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
-				bool psr2_enabled = dev_priv->psr.psr2_enabled;
+				bool psr2_enabled = i915->psr.psr2_enabled;
I915_WRITE(PSR_EVENT(cpu_transcoder), val);
  				psr_event_print(val, psr2_enabled);
@@ -225,7 +225,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
  		mask |= I915_READ(EDP_PSR_IMR);
  		I915_WRITE(EDP_PSR_IMR, mask);
- schedule_work(&dev_priv->psr.work);
+		schedule_work(&i915->psr.work);
  	}
  }
@@ -279,7 +279,7 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) void intel_psr_init_dpcd(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
@@ -300,14 +300,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
  		return;
  	}
- dev_priv->psr.sink_support = true;
-	dev_priv->psr.sink_sync_latency =
+	i915->psr.sink_support = true;
+	i915->psr.sink_sync_latency =
  		intel_dp_get_sink_sync_latency(intel_dp);
- WARN_ON(dev_priv->psr.dp);
-	dev_priv->psr.dp = intel_dp;
+	WARN_ON(i915->psr.dp);
+	i915->psr.dp = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9 &&
+	if (INTEL_GEN(i915) >= 9 &&
  	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
  		bool y_req = intel_dp->psr_dpcd[1] &
  			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
@@ -324,14 +324,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
  		 * Y-coordinate requirement panels we would need to enable
  		 * GTC first.
  		 */
-		dev_priv->psr.sink_psr2_support = y_req && alpm;
+		i915->psr.sink_psr2_support = y_req && alpm;
  		DRM_DEBUG_KMS("PSR2 %ssupported\n",
-			      dev_priv->psr.sink_psr2_support ? "" : "not ");
+			      i915->psr.sink_psr2_support ? "" : "not ");
- if (dev_priv->psr.sink_psr2_support) {
-			dev_priv->psr.colorimetry_support =
+		if (i915->psr.sink_psr2_support) {
+			i915->psr.colorimetry_support =
  				intel_dp_get_colorimetry_status(intel_dp);
-			dev_priv->psr.su_x_granularity =
+			i915->psr.su_x_granularity =
  				intel_dp_get_su_x_granulartiy(intel_dp);
  		}
  	}
@@ -341,15 +341,15 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
  				const struct intel_crtc_state *crtc_state)
  {
  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	struct dp_sdp psr_vsc;
- if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
  		memset(&psr_vsc, 0, sizeof(psr_vsc));
  		psr_vsc.sdp_header.HB0 = 0;
  		psr_vsc.sdp_header.HB1 = 0x7;
-		if (dev_priv->psr.colorimetry_support) {
+		if (i915->psr.colorimetry_support) {
  			psr_vsc.sdp_header.HB2 = 0x5;
  			psr_vsc.sdp_header.HB3 = 0x13;
  		} else {
@@ -372,7 +372,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 aux_clock_divider, aux_ctl;
  	int i;
  	static const u8 aux_msg[] = {
@@ -405,19 +405,19 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
-	if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
  				   DP_ALPM_ENABLE);
  		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
  	} else {
-		if (dev_priv->psr.link_standby)
+		if (i915->psr.link_standby)
  			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(i915) >= 8)
  			dpcd_val |= DP_PSR_CRC_VERIFICATION;
  	}
@@ -428,26 +428,26 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 val = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		val |= EDP_PSR_TP4_TIME_0US;
- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+	if (i915->vbt.psr.tp1_wakeup_time_us == 0)
  		val |= EDP_PSR_TP1_TIME_0us;
-	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
+	else if (i915->vbt.psr.tp1_wakeup_time_us <= 100)
  		val |= EDP_PSR_TP1_TIME_100us;
-	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+	else if (i915->vbt.psr.tp1_wakeup_time_us <= 500)
  		val |= EDP_PSR_TP1_TIME_500us;
  	else
  		val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+	if (i915->vbt.psr.tp2_tp3_wakeup_time_us == 0)
  		val |= EDP_PSR_TP2_TP3_TIME_0us;
-	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+	else if (i915->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
  		val |= EDP_PSR_TP2_TP3_TIME_100us;
-	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+	else if (i915->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
  		val |= EDP_PSR_TP2_TP3_TIME_500us;
  	else
  		val |= EDP_PSR_TP2_TP3_TIME_2500us;
@@ -463,31 +463,31 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 max_sleep_time = 0x1f;
  	u32 val = EDP_PSR_ENABLE;
/* Let's use 6 as the minimum to cover all known cases including the
  	 * off-by-one issue that HW has in some cases.
  	 */
-	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+	int idle_frames = max(6, i915->vbt.psr.idle_frames);
/* sink_sync_latency of 8 means source has to wait for more than 8
  	 * frames, we'll go with 9 frames for now
  	 */
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+	idle_frames = max(idle_frames, i915->psr.sink_sync_latency + 1);
  	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
-	if (IS_HASWELL(dev_priv))
+	if (IS_HASWELL(i915))
  		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby)
+	if (i915->psr.link_standby)
  		val |= EDP_PSR_LINK_STANDBY;
val |= intel_psr1_get_tp_time(intel_dp); - if (INTEL_GEN(dev_priv) >= 8)
+	if (INTEL_GEN(i915) >= 8)
  		val |= EDP_PSR_CRC_ENABLE;
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
@@ -496,29 +496,29 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	u32 val;
/* Let's use 6 as the minimum to cover all known cases including the
  	 * off-by-one issue that HW has in some cases.
  	 */
-	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+	int idle_frames = max(6, i915->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+	idle_frames = max(idle_frames, i915->psr.sink_sync_latency + 1);
  	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+	val |= EDP_PSR2_FRAME_BEFORE_SU(i915->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
-	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+	if (i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+	    i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
  		val |= EDP_PSR2_TP2_TIME_50us;
-	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+	else if (i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
  		val |= EDP_PSR2_TP2_TIME_100us;
-	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+	else if (i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
  		val |= EDP_PSR2_TP2_TIME_500us;
  	else
  		val |= EDP_PSR2_TP2_TIME_2500us;
@@ -535,12 +535,12 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
  static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
  				    struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
  	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
  	int psr_max_h = 0, psr_max_v = 0;
- if (!dev_priv->psr.sink_psr2_support)
+	if (!i915->psr.sink_psr2_support)
  		return false;
/*
@@ -553,10 +553,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
  		return false;
  	}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915)) {
  		psr_max_h = 4096;
  		psr_max_v = 2304;
-	} else if (IS_GEN(dev_priv, 9)) {
+	} else if (IS_GEN(i915, 9)) {
  		psr_max_h = 3640;
  		psr_max_v = 2304;
  	}
@@ -574,9 +574,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
  	 * only need to validate the SU block width is a multiple of
  	 * x granularity.
  	 */
-	if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+	if (crtc_hdisplay % i915->psr.su_x_granularity) {
  		DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
-			      crtc_hdisplay, dev_priv->psr.su_x_granularity);
+			      crtc_hdisplay, i915->psr.su_x_granularity);
  		return false;
  	}
@@ -592,15 +592,15 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
  			      struct intel_crtc_state *crtc_state)
  {
  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	const struct drm_display_mode *adjusted_mode =
  		&crtc_state->base.adjusted_mode;
  	int psr_setup_time;
- if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(i915))
  		return;
- if (intel_dp != dev_priv->psr.dp)
+	if (intel_dp != i915->psr.dp)
  		return;
/*
@@ -615,7 +615,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
  		return;
  	}
- if (dev_priv->psr.sink_not_reliable) {
+	if (i915->psr.sink_not_reliable) {
  		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
  		return;
  	}
@@ -645,24 +645,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
static void intel_psr_activate(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(i915) >= 9)
  		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
  	WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
-	WARN_ON(dev_priv->psr.active);
-	lockdep_assert_held(&dev_priv->psr.lock);
+	WARN_ON(i915->psr.active);
+	lockdep_assert_held(&i915->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
-	if (dev_priv->psr.psr2_enabled)
+	if (i915->psr.psr2_enabled)
  		hsw_activate_psr2(intel_dp);
  	else
  		hsw_activate_psr1(intel_dp);
- dev_priv->psr.active = true;
+	i915->psr.active = true;
  }
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *i915,
  					 enum transcoder cpu_transcoder)
  {
  	static const i915_reg_t regs[] = {
@@ -672,7 +672,7 @@ static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
  		[TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
  	};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+	WARN_ON(INTEL_GEN(i915) < 9);
if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
  		    !regs[cpu_transcoder].reg))
@@ -684,19 +684,19 @@ static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
  static void intel_psr_enable_source(struct intel_dp *intel_dp,
  				    const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  	u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
  	 * use hardcoded values PSR AUX transactions
  	 */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
-					   !IS_GEMINILAKE(dev_priv))) {
-		i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+	if (i915->psr.psr2_enabled && (IS_GEN(i915, 9) &&
+					   !IS_GEMINILAKE(i915))) {
+		i915_reg_t reg = gen9_chicken_trans_reg(i915,
  							cpu_transcoder);
  		u32 chicken = I915_READ(reg);
@@ -716,29 +716,29 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
  	       EDP_PSR_DEBUG_MASK_LPSP |
  	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
I915_WRITE(EDP_PSR_DEBUG, mask);
  }
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct drm_i915_private *i915,
  				    const struct intel_crtc_state *crtc_state)
  {
-	struct intel_dp *intel_dp = dev_priv->psr.dp;
+	struct intel_dp *intel_dp = i915->psr.dp;
- WARN_ON(dev_priv->psr.enabled);
+	WARN_ON(i915->psr.enabled);
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-	dev_priv->psr.busy_frontbuffer_bits = 0;
-	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+	i915->psr.psr2_enabled = intel_psr2_enabled(i915, crtc_state);
+	i915->psr.busy_frontbuffer_bits = 0;
+	i915->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
-		      dev_priv->psr.psr2_enabled ? "2" : "1");
+		      i915->psr.psr2_enabled ? "2" : "1");
  	intel_psr_setup_vsc(intel_dp, crtc_state);
  	intel_psr_enable_sink(intel_dp);
  	intel_psr_enable_source(intel_dp, crtc_state);
-	dev_priv->psr.enabled = true;
+	i915->psr.enabled = true;
intel_psr_activate(intel_dp);
  }
@@ -753,41 +753,41 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
  void intel_psr_enable(struct intel_dp *intel_dp,
  		      const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!crtc_state->has_psr)
  		return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+	if (WARN_ON(!CAN_PSR(i915)))
  		return;
- WARN_ON(dev_priv->drrs.dp);
+	WARN_ON(i915->drrs.dp);
- mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
- if (!psr_global_enabled(dev_priv->psr.debug)) {
+	if (!psr_global_enabled(i915->psr.debug)) {
  		DRM_DEBUG_KMS("PSR disabled by flag\n");
  		goto unlock;
  	}
- intel_psr_enable_locked(dev_priv, crtc_state);
+	intel_psr_enable_locked(i915, crtc_state);
unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
  }
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct drm_i915_private *i915)
  {
  	u32 val;
- if (!dev_priv->psr.active) {
-		if (INTEL_GEN(dev_priv) >= 9)
+	if (!i915->psr.active) {
+		if (INTEL_GEN(i915) >= 9)
  			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
  		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
  		return;
  	}
- if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		val = I915_READ(EDP_PSR2_CTL);
  		WARN_ON(!(val & EDP_PSR2_ENABLE));
  		I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
@@ -796,26 +796,26 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
  		WARN_ON(!(val & EDP_PSR_ENABLE));
  		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
  	}
-	dev_priv->psr.active = false;
+	i915->psr.active = false;
  }
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	i915_reg_t psr_status;
  	u32 psr_status_mask;
- lockdep_assert_held(&dev_priv->psr.lock);
+	lockdep_assert_held(&i915->psr.lock);
- if (!dev_priv->psr.enabled)
+	if (!i915->psr.enabled)
  		return;
DRM_DEBUG_KMS("Disabling PSR%s\n",
-		      dev_priv->psr.psr2_enabled ? "2" : "1");
+		      i915->psr.psr2_enabled ? "2" : "1");
- intel_psr_exit(dev_priv);
+	intel_psr_exit(i915);
- if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		psr_status = EDP_PSR2_STATUS;
  		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
  	} else {
@@ -824,14 +824,14 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
  	}
/* Wait till PSR is idle */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    psr_status, psr_status_mask, 0, 2000))
  		DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- dev_priv->psr.enabled = false;
+	i915->psr.enabled = false;
  }
/**
@@ -844,23 +844,23 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
  void intel_psr_disable(struct intel_dp *intel_dp,
  		       const struct intel_crtc_state *old_crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!old_crtc_state->has_psr)
  		return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+	if (WARN_ON(!CAN_PSR(i915)))
  		return;
- mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
intel_psr_disable_locked(intel_dp); - mutex_unlock(&dev_priv->psr.lock);
-	cancel_work_sync(&dev_priv->psr.work);
+	mutex_unlock(&i915->psr.lock);
+	cancel_work_sync(&i915->psr.work);
  }
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct drm_i915_private *i915)
  {
  	/*
  	 * Display WA #0884: all
@@ -871,7 +871,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
  	 * but it makes more sense write to the current active
  	 * pipe.
  	 */
-	I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+	I915_WRITE(CURSURFLIVE(i915->psr.pipe), 0);
  }
/**
@@ -886,22 +886,22 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
  void intel_psr_update(struct intel_dp *intel_dp,
  		      const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct i915_psr *psr = &i915->psr;
  	bool enable, psr2_enable;
- if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+	if (!CAN_PSR(i915) || READ_ONCE(psr->dp) != intel_dp)
  		return;
- mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
-	psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+	psr2_enable = intel_psr2_enabled(i915, crtc_state);
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
  		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
  		if (crtc_state->crc_enabled && psr->enabled)
-			psr_force_hw_tracking_exit(dev_priv);
+			psr_force_hw_tracking_exit(i915);
goto unlock;
  	}
@@ -910,10 +910,10 @@ void intel_psr_update(struct intel_dp *intel_dp,
  		intel_psr_disable_locked(intel_dp);
if (enable)
-		intel_psr_enable_locked(dev_priv, crtc_state);
+		intel_psr_enable_locked(i915, crtc_state);
unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
  }
/**
@@ -930,13 +930,13 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
  			    u32 *out_value)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
+	if (!i915->psr.enabled || !new_crtc_state->has_psr)
  		return 0;
/* FIXME: Update this for PSR2 if we need to wait for idle */
-	if (READ_ONCE(dev_priv->psr.psr2_enabled))
+	if (READ_ONCE(i915->psr.psr2_enabled))
  		return 0;
/*
@@ -946,22 +946,22 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
  	 * defensive enough to cover everything.
  	 */
- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+	return __intel_wait_for_register(&i915->uncore, EDP_PSR_STATUS,
  					 EDP_PSR_STATUS_STATE_MASK,
  					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
  					 out_value);
  }
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *i915)
  {
  	i915_reg_t reg;
  	u32 mask;
  	int err;
- if (!dev_priv->psr.enabled)
+	if (!i915->psr.enabled)
  		return false;
- if (dev_priv->psr.psr2_enabled) {
+	if (i915->psr.psr2_enabled) {
  		reg = EDP_PSR2_STATUS;
  		mask = EDP_PSR2_STATUS_STATE_MASK;
  	} else {
@@ -969,20 +969,20 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
  		mask = EDP_PSR_STATUS_STATE_MASK;
  	}
- mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
- err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
+	err = intel_wait_for_register(&i915->uncore, reg, mask, 0, 50);
  	if (err)
  		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
-	mutex_lock(&dev_priv->psr.lock);
-	return err == 0 && dev_priv->psr.enabled;
+	mutex_lock(&i915->psr.lock);
+	return err == 0 && i915->psr.enabled;
  }
-static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+static int intel_psr_fastset_force(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct drm_modeset_acquire_ctx ctx;
  	struct drm_atomic_state *state;
  	struct drm_crtc *crtc;
@@ -1032,7 +1032,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
  	return err;
  }
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct drm_i915_private *i915, u64 val)
  {
  	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
  	u32 old_mode;
@@ -1044,25 +1044,25 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
  		return -EINVAL;
  	}
- ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+	ret = mutex_lock_interruptible(&i915->psr.lock);
  	if (ret)
  		return ret;
- old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
-	dev_priv->psr.debug = val;
-	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+	old_mode = i915->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+	i915->psr.debug = val;
+	intel_psr_irq_control(i915, i915->psr.debug);
- mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
if (old_mode != mode)
-		ret = intel_psr_fastset_force(dev_priv);
+		ret = intel_psr_fastset_force(i915);
return ret;
  }
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct drm_i915_private *i915)
  {
-	struct i915_psr *psr = &dev_priv->psr;
+	struct i915_psr *psr = &i915->psr;
intel_psr_disable_locked(psr->dp);
  	psr->sink_not_reliable = true;
@@ -1072,16 +1072,16 @@ static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
static void intel_psr_work(struct work_struct *work)
  {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), psr.work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), psr.work);
- mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
- if (!dev_priv->psr.enabled)
+	if (!i915->psr.enabled)
  		goto unlock;
- if (READ_ONCE(dev_priv->psr.irq_aux_error))
-		intel_psr_handle_irq(dev_priv);
+	if (READ_ONCE(i915->psr.irq_aux_error))
+		intel_psr_handle_irq(i915);
/*
  	 * We have to make sure PSR is ready for re-enable
@@ -1089,7 +1089,7 @@ static void intel_psr_work(struct work_struct *work)
  	 * PSR might take some time to get fully disabled
  	 * and be ready for re-enable.
  	 */
-	if (!__psr_wait_for_idle_locked(dev_priv))
+	if (!__psr_wait_for_idle_locked(i915))
  		goto unlock;
/*
@@ -1097,17 +1097,17 @@ static void intel_psr_work(struct work_struct *work)
  	 * recheck. Since psr_flush first clears this and then reschedules we
  	 * won't ever miss a flush when bailing out here.
  	 */
-	if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+	if (i915->psr.busy_frontbuffer_bits || i915->psr.active)
  		goto unlock;
- intel_psr_activate(dev_priv->psr.dp);
+	intel_psr_activate(i915->psr.dp);
  unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
  }
/**
   * intel_psr_invalidate - Invalidade PSR
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   * @origin: which operation caused the invalidate
   *
@@ -1118,33 +1118,33 @@ static void intel_psr_work(struct work_struct *work)
   *
   * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
   */
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct drm_i915_private *i915,
  			  unsigned frontbuffer_bits, enum fb_op_origin origin)
  {
-	if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(i915))
  		return;
if (origin == ORIGIN_FLIP)
  		return;
- mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
-		mutex_unlock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
+	if (!i915->psr.enabled) {
+		mutex_unlock(&i915->psr.lock);
  		return;
  	}
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(i915->psr.pipe);
+	i915->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
-		intel_psr_exit(dev_priv);
+		intel_psr_exit(i915);
- mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&i915->psr.lock);
  }
/**
   * intel_psr_flush - Flush PSR
- * @dev_priv: i915 device
+ * @i915: i915 device
   * @frontbuffer_bits: frontbuffer plane tracking bits
   * @origin: which operation caused the flush
   *
@@ -1155,55 +1155,55 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
   *
   * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
   */
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct drm_i915_private *i915,
  		     unsigned frontbuffer_bits, enum fb_op_origin origin)
  {
-	if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(i915))
  		return;
if (origin == ORIGIN_FLIP)
  		return;
- mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
-		mutex_unlock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
+	if (!i915->psr.enabled) {
+		mutex_unlock(&i915->psr.lock);
  		return;
  	}
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(i915->psr.pipe);
+	i915->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
  	if (frontbuffer_bits)
-		psr_force_hw_tracking_exit(dev_priv);
+		psr_force_hw_tracking_exit(i915);
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-		schedule_work(&dev_priv->psr.work);
-	mutex_unlock(&dev_priv->psr.lock);
+	if (!i915->psr.active && !i915->psr.busy_frontbuffer_bits)
+		schedule_work(&i915->psr.work);
+	mutex_unlock(&i915->psr.lock);
  }
/**
   * intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @i915: i915 device private
   *
   * This function is  called only once at driver load to initialize basic
   * PSR stuff.
   */
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct drm_i915_private *i915)
  {
  	u32 val;
- if (!HAS_PSR(dev_priv))
+	if (!HAS_PSR(i915))
  		return;
- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
+	i915->psr_mmio_base = IS_HASWELL(i915) ?
  		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- if (!dev_priv->psr.sink_support)
+	if (!i915->psr.sink_support)
  		return;
if (i915_modparams.enable_psr == -1)
-		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+		if (INTEL_GEN(i915) < 9 || !i915->vbt.psr.enable)
  			i915_modparams.enable_psr = 0;
/*
@@ -1218,31 +1218,31 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
  	val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
  	if (val) {
  		DRM_DEBUG_KMS("PSR interruption error set\n");
-		dev_priv->psr.sink_not_reliable = true;
+		i915->psr.sink_not_reliable = true;
  	}
/* Set link_standby x link_off defaults */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
  		/* HSW and BDW require workarounds that we don't implement. */
-		dev_priv->psr.link_standby = false;
+		i915->psr.link_standby = false;
  	else
  		/* For new platforms let's respect VBT back again */
-		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+		i915->psr.link_standby = i915->vbt.psr.full_link;
- INIT_WORK(&dev_priv->psr.work, intel_psr_work);
-	mutex_init(&dev_priv->psr.lock);
+	INIT_WORK(&i915->psr.work, intel_psr_work);
+	mutex_init(&i915->psr.lock);
  }
void intel_psr_short_pulse(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct i915_psr *psr = &i915->psr;
  	u8 val;
  	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
  			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
  			  DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+	if (!CAN_PSR(i915) || !intel_dp_is_edp(intel_dp))
  		return;
mutex_lock(&psr->lock);
@@ -1288,15 +1288,15 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
bool intel_psr_enabled(struct intel_dp *intel_dp)
  {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
  	bool ret;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+	if (!CAN_PSR(i915) || !intel_dp_is_edp(intel_dp))
  		return false;
- mutex_lock(&dev_priv->psr.lock);
-	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_lock(&i915->psr.lock);
+	ret = (i915->psr.dp == intel_dp && i915->psr.enabled);
+	mutex_unlock(&i915->psr.lock);
return ret;
  }
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/intel_psr.h
index dc818826f36d..8a3ddc4ae44d 100644
--- a/drivers/gpu/drm/i915/intel_psr.h
+++ b/drivers/gpu/drm/i915/intel_psr.h
@@ -12,7 +12,7 @@ struct drm_i915_private;
  struct intel_crtc_state;
  struct intel_dp;
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+#define CAN_PSR(i915) (HAS_PSR(i915) && i915->psr.sink_support)
  void intel_psr_init_dpcd(struct intel_dp *intel_dp);
  void intel_psr_enable(struct intel_dp *intel_dp,
  		      const struct intel_crtc_state *crtc_state);
@@ -20,18 +20,18 @@ void intel_psr_disable(struct intel_dp *intel_dp,
  		       const struct intel_crtc_state *old_crtc_state);
  void intel_psr_update(struct intel_dp *intel_dp,
  		      const struct intel_crtc_state *crtc_state);
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+int intel_psr_debug_set(struct drm_i915_private *i915, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *i915,
  			  unsigned frontbuffer_bits,
  			  enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct drm_i915_private *i915,
  		     unsigned frontbuffer_bits,
  		     enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_init(struct drm_i915_private *i915);
  void intel_psr_compute_config(struct intel_dp *intel_dp,
  			      struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_irq_control(struct drm_i915_private *i915, u32 debug);
+void intel_psr_irq_handler(struct drm_i915_private *i915, u32 psr_iir);
  void intel_psr_short_pulse(struct intel_dp *intel_dp);
  int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
  			    u32 *out_value);
diff --git a/drivers/gpu/drm/i915/intel_quirks.h b/drivers/gpu/drm/i915/intel_quirks.h
index b0fcff142a56..b617ca047695 100644
--- a/drivers/gpu/drm/i915/intel_quirks.h
+++ b/drivers/gpu/drm/i915/intel_quirks.h
@@ -8,6 +8,6 @@
struct drm_i915_private; -void intel_init_quirks(struct drm_i915_private *dev_priv);
+void intel_init_quirks(struct drm_i915_private *i915);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index a7acceb13473..1042f1c0b125 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -20,10 +20,10 @@ enum i915_drm_suspend_mode {
  	I915_DRM_SUSPEND_HIBERNATE,
  };
-void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_init_early(struct drm_i915_private *i915);
+void intel_runtime_pm_enable(struct drm_i915_private *i915);
+void intel_runtime_pm_disable(struct drm_i915_private *i915);
+void intel_runtime_pm_cleanup(struct drm_i915_private *i915);
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
  intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0860ae36bb87..b916c79d6b31 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -212,18 +212,18 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
  static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
  {
  	struct drm_device *dev = intel_sdvo->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 bval = val, cval = val;
  	int i;
- if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		I915_WRITE(intel_sdvo->sdvo_reg, val);
  		POSTING_READ(intel_sdvo->sdvo_reg);
  		/*
  		 * HW workaround, need to write this twice for issue
  		 * that may result in first write getting masked.
  		 */
-		if (HAS_PCH_IBX(dev_priv)) {
+		if (HAS_PCH_IBX(i915)) {
  			I915_WRITE(intel_sdvo->sdvo_reg, val);
  			POSTING_READ(intel_sdvo->sdvo_reg);
  		}
@@ -1400,7 +1400,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
  				  const struct intel_crtc_state *crtc_state,
  				  const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
  	const struct intel_sdvo_connector_state *sdvo_state =
@@ -1481,13 +1481,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
  		return;
/* Set the SDVO control regs. */
-	if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		/* The real mode polarity is set by the SDVO commands, using
  		 * struct intel_sdvo_dtd. */
  		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-		if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+		if (!HAS_PCH_SPLIT(i915) && crtc_state->limited_color_range)
  			sdvox |= HDMI_COLOR_RANGE_16_235;
-		if (INTEL_GEN(dev_priv) < 5)
+		if (INTEL_GEN(i915) < 5)
  			sdvox |= SDVO_BORDER_ENABLE;
  	} else {
  		sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1498,15 +1498,15 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
  		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
  	}
- if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
  	else
  		sdvox |= SDVO_PIPE_SEL(crtc->pipe);
- if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
  		/* done in crtc_mode_set as the dpll_md reg must be written early */
-	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+	} else if (IS_I945G(i915) || IS_I945GM(i915) ||
+		   IS_G33(i915) || IS_PINEVIEW(i915)) {
  		/* done in crtc_mode_set as it lives inside the dpll register */
  	} else {
  		sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1514,7 +1514,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
  	}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
-	    INTEL_GEN(dev_priv) < 5)
+	    INTEL_GEN(i915) < 5)
  		sdvox |= SDVO_STALL_SELECT;
  	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
  }
@@ -1531,7 +1531,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
  	return active_outputs & intel_sdvo_connector->output_flag;
  }
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_sdvo_port_enabled(struct drm_i915_private *i915,
  			     i915_reg_t sdvo_reg, enum pipe *pipe)
  {
  	u32 val;
@@ -1539,9 +1539,9 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
  	val = I915_READ(sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
-	if (HAS_PCH_CPT(dev_priv))
+	if (HAS_PCH_CPT(i915))
  		*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
-	else if (IS_CHERRYVIEW(dev_priv))
+	else if (IS_CHERRYVIEW(i915))
  		*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
  	else
  		*pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
@@ -1552,14 +1552,14 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
  static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
  				    enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
  	u16 active_outputs = 0;
  	bool ret;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
+	ret = intel_sdvo_port_enabled(i915, intel_sdvo->sdvo_reg, pipe);
return ret || active_outputs;
  }
@@ -1568,7 +1568,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
  				  struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
  	struct intel_sdvo_dtd dtd;
  	int encoder_pixel_multiplier = 0;
@@ -1610,7 +1610,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
  	 * encoder->get_config we so already have a valid pixel multplier on all
  	 * other platfroms.
  	 */
-	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+	if (IS_I915G(i915) || IS_I915GM(i915)) {
  		pipe_config->pixel_multiplier =
  			((sdvox & SDVO_PORT_MULTIPLY_MASK)
  			 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1693,7 +1693,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
  			       const struct intel_crtc_state *old_crtc_state,
  			       const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  	u32 temp;
@@ -1716,13 +1716,13 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
  	 * to transcoder A after disabling it to allow the
  	 * matching DP port to be enabled on transcoder A.
  	 */
-	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+	if (HAS_PCH_IBX(i915) && crtc->pipe == PIPE_B) {
  		/*
  		 * We get CPU/PCH FIFO underruns on the other pipe when
  		 * doing the workaround. Sweep them under the rug.
  		 */
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, false);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, false);
temp &= ~SDVO_PIPE_SEL_MASK;
  		temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
@@ -1731,9 +1731,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
  		temp &= ~SDVO_ENABLE;
  		intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+		intel_wait_for_vblank_if_active(i915, PIPE_A);
+		intel_set_cpu_fifo_underrun_reporting(i915, PIPE_A, true);
+		intel_set_pch_fifo_underrun_reporting(i915, PIPE_A, true);
  	}
  }
@@ -1755,7 +1755,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
  			      const struct drm_connector_state *conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
  	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
  	u32 temp;
@@ -1768,7 +1768,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
  	intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
-		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+		intel_wait_for_vblank(i915, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
  	/*
@@ -1865,17 +1865,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
  {
-	struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
  	u16 hotplug;
- if (!I915_HAS_HOTPLUG(dev_priv))
+	if (!I915_HAS_HOTPLUG(i915))
  		return 0;
/*
  	 * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
  	 * on the line.
  	 */
-	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+	if (IS_I945G(i915) || IS_I945GM(i915))
  		return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -1919,11 +1919,11 @@ intel_sdvo_get_edid(struct drm_connector *connector)
  static struct edid *
  intel_sdvo_get_analog_edid(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
return drm_get_edid(connector,
-			    intel_gmbus_get_adapter(dev_priv,
-						    dev_priv->vbt.crt_ddc_pin));
+			    intel_gmbus_get_adapter(i915,
+						    i915->vbt.crt_ddc_pin));
  }
static enum drm_connector_status
@@ -2186,7 +2186,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
  static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
  {
  	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	struct drm_display_mode *newmode;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2196,9 +2196,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
  	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
  	 * SDVO->LVDS transcoders can't cope with the EDID mode.
  	 */
-	if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
+	if (i915->vbt.sdvo_lvds_vbt_mode != NULL) {
  		newmode = drm_mode_duplicate(connector->dev,
-					     dev_priv->vbt.sdvo_lvds_vbt_mode);
+					     i915->vbt.sdvo_lvds_vbt_mode);
  		if (newmode != NULL) {
  			/* Guarantee the mode is preferred */
  			newmode->type = (DRM_MODE_TYPE_PREFERRED |
@@ -2488,15 +2488,15 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
   * outputs, then LVDS outputs.
   */
  static void
-intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+intel_sdvo_select_ddc_bus(struct drm_i915_private *i915,
  			  struct intel_sdvo *sdvo)
  {
  	struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
-		mapping = &dev_priv->vbt.sdvo_mappings[0];
+		mapping = &i915->vbt.sdvo_mappings[0];
  	else
-		mapping = &dev_priv->vbt.sdvo_mappings[1];
+		mapping = &i915->vbt.sdvo_mappings[1];
if (mapping->initialized)
  		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2505,24 +2505,24 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
  }
static void
-intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+intel_sdvo_select_i2c_bus(struct drm_i915_private *i915,
  			  struct intel_sdvo *sdvo)
  {
  	struct sdvo_device_mapping *mapping;
  	u8 pin;
if (sdvo->port == PORT_B)
-		mapping = &dev_priv->vbt.sdvo_mappings[0];
+		mapping = &i915->vbt.sdvo_mappings[0];
  	else
-		mapping = &dev_priv->vbt.sdvo_mappings[1];
+		mapping = &i915->vbt.sdvo_mappings[1];
if (mapping->initialized &&
-	    intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
+	    intel_gmbus_is_valid_pin(i915, mapping->i2c_pin))
  		pin = mapping->i2c_pin;
  	else
  		pin = GMBUS_PIN_DPB;
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+	sdvo->i2c = intel_gmbus_get_adapter(i915, pin);
/*
  	 * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
@@ -2546,17 +2546,17 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
  }
static u8
-intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
+intel_sdvo_get_slave_addr(struct drm_i915_private *i915,
  			  struct intel_sdvo *sdvo)
  {
  	struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
-		my_mapping = &dev_priv->vbt.sdvo_mappings[0];
-		other_mapping = &dev_priv->vbt.sdvo_mappings[1];
+		my_mapping = &i915->vbt.sdvo_mappings[0];
+		other_mapping = &i915->vbt.sdvo_mappings[1];
  	} else {
-		my_mapping = &dev_priv->vbt.sdvo_mappings[1];
-		other_mapping = &dev_priv->vbt.sdvo_mappings[0];
+		my_mapping = &i915->vbt.sdvo_mappings[1];
+		other_mapping = &i915->vbt.sdvo_mappings[0];
  	}
/* If the BIOS described our SDVO device, take advantage of it. */
@@ -2616,10 +2616,10 @@ static void
  intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
  			       struct intel_sdvo_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
-	if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
+	if (INTEL_GEN(i915) >= 4 && IS_MOBILE(i915)) {
  		intel_attach_broadcast_rgb_property(&connector->base.base);
  	}
  	intel_attach_aspect_ratio_property(&connector->base.base);
@@ -3180,9 +3180,9 @@ static const struct i2c_lock_operations proxy_lock_ops = {
static bool
  intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
-			  struct drm_i915_private *dev_priv)
+			  struct drm_i915_private *i915)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
sdvo->ddc.owner = THIS_MODULE;
  	sdvo->ddc.class = I2C_CLASS_DDC;
@@ -3195,23 +3195,23 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
  	return i2c_add_adapter(&sdvo->ddc) == 0;
  }
-static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
+static void assert_sdvo_port_valid(const struct drm_i915_private *i915,
  				   enum port port)
  {
-	if (HAS_PCH_SPLIT(dev_priv))
+	if (HAS_PCH_SPLIT(i915))
  		WARN_ON(port != PORT_B);
  	else
  		WARN_ON(port != PORT_B && port != PORT_C);
  }
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+bool intel_sdvo_init(struct drm_i915_private *i915,
  		     i915_reg_t sdvo_reg, enum port port)
  {
  	struct intel_encoder *intel_encoder;
  	struct intel_sdvo *intel_sdvo;
  	int i;
- assert_sdvo_port_valid(dev_priv, port);
+	assert_sdvo_port_valid(i915, port);
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
  	if (!intel_sdvo)
@@ -3220,9 +3220,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
  	intel_sdvo->sdvo_reg = sdvo_reg;
  	intel_sdvo->port = port;
  	intel_sdvo->slave_addr =
-		intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1;
-	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
-	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv))
+		intel_sdvo_get_slave_addr(i915, intel_sdvo) >> 1;
+	intel_sdvo_select_i2c_bus(i915, intel_sdvo);
+	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, i915))
  		goto err_i2c_bus;
/* encoder type will be decided later */
@@ -3230,7 +3230,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
  	intel_encoder->type = INTEL_OUTPUT_SDVO;
  	intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
  	intel_encoder->port = port;
-	drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+	drm_encoder_init(&i915->drm, &intel_encoder->base,
  			 &intel_sdvo_enc_funcs, 0,
  			 "SDVO %c", port_name(port));
@@ -3246,7 +3246,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
  	}
intel_encoder->compute_config = intel_sdvo_compute_config;
-	if (HAS_PCH_SPLIT(dev_priv)) {
+	if (HAS_PCH_SPLIT(i915)) {
  		intel_encoder->disable = pch_disable_sdvo;
  		intel_encoder->post_disable = pch_post_disable_sdvo;
  	} else {
@@ -3290,7 +3290,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
  	 */
  	intel_sdvo->base.cloneable = 0;
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
+	intel_sdvo_select_ddc_bus(i915, intel_sdvo);
/* Set the input timing to the screen. Assume always input 0. */
  	if (!intel_sdvo_set_target_input(intel_sdvo))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/intel_sdvo.h
index c9e05bcdd141..1ec6766c5ae1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/intel_sdvo.h
@@ -15,9 +15,9 @@
  struct drm_i915_private;
  enum pipe;
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_sdvo_port_enabled(struct drm_i915_private *i915,
  			     i915_reg_t sdvo_reg, enum pipe *pipe);
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+bool intel_sdvo_init(struct drm_i915_private *i915,
  		     i915_reg_t reg, enum port port);
#endif /* __INTEL_SDVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index c180815faabd..73871e5e8e19 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -94,12 +94,12 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
  	long timeout = msecs_to_jiffies_timeout(1);
  	int scanline, min, max, vblank_start;
  	wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
-	bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	bool need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  		intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
  	DEFINE_WAIT(wait);
  	u32 psr_status;
@@ -207,7 +207,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
  	int scanline_end = intel_get_crtc_scanline(crtc);
  	u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
  	ktime_t end_vbl_time = ktime_get();
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); @@ -227,7 +227,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) local_irq_enable(); - if (intel_vgpu_active(dev_priv))
+	if (intel_vgpu_active(i915))
  		return;
if (crtc->debug.start_vbl_count &&
@@ -353,7 +353,7 @@ skl_program_scaler(struct intel_plane *plane,
  		   const struct intel_crtc_state *crtc_state,
  		   const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	int scaler_id = plane_state->scaler_id;
  	const struct intel_scaler *scaler =
@@ -375,7 +375,7 @@ skl_program_scaler(struct intel_plane *plane,
/* TODO: handle sub-pixel coordinates */
  	if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
-	    !icl_is_hdr_plane(dev_priv, plane->id)) {
+	    !icl_is_hdr_plane(i915, plane->id)) {
  		y_hphase = skl_scaler_calc_phase(1, hscale, false);
  		y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -415,7 +415,7 @@ icl_program_input_csc(struct intel_plane *plane,
  		      const struct intel_crtc_state *crtc_state,
  		      const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	enum plane_id plane_id = plane->id;
@@ -507,7 +507,7 @@ skl_program_plane(struct intel_plane *plane,
  		  const struct intel_plane_state *plane_state,
  		  int color_plane, bool slave, u32 plane_ctl)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum plane_id plane_id = plane->id;
  	enum pipe pipe = plane->pipe;
  	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -529,7 +529,7 @@ skl_program_plane(struct intel_plane *plane,
plane_ctl |= skl_plane_ctl_crtc(crtc_state); - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		plane_color_ctl = plane_state->color_ctl |
  			glk_plane_color_ctl_crtc(crtc_state);
@@ -549,7 +549,7 @@ skl_program_plane(struct intel_plane *plane,
  		crtc_y = 0;
  	}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
  	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
@@ -557,7 +557,7 @@ skl_program_plane(struct intel_plane *plane,
  	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
  		      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
+	if (icl_is_hdr_plane(i915, plane_id)) {
  		u32 cus_ctl = 0;
if (linked) {
@@ -578,10 +578,10 @@ skl_program_plane(struct intel_plane *plane,
  		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
  	}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
- if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+	if (fb->format->is_yuv && icl_is_hdr_plane(i915, plane_id))
  		icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
@@ -592,7 +592,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); - if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(i915) < 11)
  		I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
  			      (plane_state->color_plane[1].y << 16) |
  			      plane_state->color_plane[1].x);
@@ -609,7 +609,7 @@ skl_program_plane(struct intel_plane *plane,
  	if (!slave && plane_state->scaler_id >= 0)
  		skl_program_scaler(plane, crtc_state, plane_state);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void
@@ -641,14 +641,14 @@ static void
  skl_disable_plane(struct intel_plane *plane,
  		  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum plane_id plane_id = plane->id;
  	enum pipe pipe = plane->pipe;
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
- if (icl_is_hdr_plane(dev_priv, plane_id))
+	if (icl_is_hdr_plane(i915, plane_id))
  		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
skl_write_plane_wm(plane, crtc_state);
@@ -656,21 +656,21 @@ skl_disable_plane(struct intel_plane *plane,
  	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
  	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static bool
  skl_plane_get_hw_state(struct intel_plane *plane,
  		       enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	enum plane_id plane_id = plane->id;
  	intel_wakeref_t wakeref;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -678,7 +678,7 @@ skl_plane_get_hw_state(struct intel_plane *plane, *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -687,7 +687,7 @@ static void
  chv_update_csc(const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	enum plane_id plane_id = plane->id;
  	/*
@@ -746,7 +746,7 @@ static void
  vlv_update_clrc(const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	enum pipe pipe = plane->pipe;
  	enum plane_id plane_id = plane->id;
@@ -863,7 +863,7 @@ vlv_update_plane(struct intel_plane *plane,
  		 const struct intel_crtc_state *crtc_state,
  		 const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	enum plane_id plane_id = plane->id;
  	u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -886,7 +886,7 @@ vlv_update_plane(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
  		      plane_state->color_plane[0].stride);
@@ -894,7 +894,7 @@ vlv_update_plane(struct intel_plane *plane,
  	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
  	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_B)
  		chv_update_csc(plane_state);
if (key->flags) {
@@ -917,38 +917,38 @@ vlv_update_plane(struct intel_plane *plane,
vlv_update_clrc(plane_state); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void
  vlv_disable_plane(struct intel_plane *plane,
  		  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	enum plane_id plane_id = plane->id;
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
  	I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static bool
  vlv_plane_get_hw_state(struct intel_plane *plane,
  		       enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	enum plane_id plane_id = plane->id;
  	intel_wakeref_t wakeref;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -956,7 +956,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane, *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -977,7 +977,7 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
  static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
  			  const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
@@ -986,7 +986,7 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
sprctl = SPRITE_ENABLE; - if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -1038,7 +1038,7 @@ ivb_update_plane(struct intel_plane *plane,
  		 const struct intel_crtc_state *crtc_state,
  		 const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	u32 sprsurf_offset = plane_state->color_plane[0].offset;
  	u32 linear_offset;
@@ -1067,12 +1067,12 @@ ivb_update_plane(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
  	I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
  	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-	if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
if (key->flags) {
@@ -1083,7 +1083,7 @@ ivb_update_plane(struct intel_plane *plane,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
  	 * register */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
  		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
  	} else {
  		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
@@ -1099,39 +1099,39 @@ ivb_update_plane(struct intel_plane *plane,
  	I915_WRITE_FW(SPRSURF(pipe),
  		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void
  ivb_disable_plane(struct intel_plane *plane,
  		  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
  	/* Disable the scaler */
-	if (IS_IVYBRIDGE(dev_priv))
+	if (IS_IVYBRIDGE(i915))
  		I915_WRITE_FW(SPRSCALE(pipe), 0);
  	I915_WRITE_FW(SPRSURF(pipe), 0);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static bool
  ivb_plane_get_hw_state(struct intel_plane *plane,
  		       enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -1139,7 +1139,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane, *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -1168,7 +1168,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
  static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
  			  const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
@@ -1177,7 +1177,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
dvscntr = DVS_ENABLE; - if (IS_GEN(dev_priv, 6))
+	if (IS_GEN(i915, 6))
  		dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -1229,7 +1229,7 @@ g4x_update_plane(struct intel_plane *plane,
  		 const struct intel_crtc_state *crtc_state,
  		 const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	u32 dvssurf_offset = plane_state->color_plane[0].offset;
  	u32 linear_offset;
@@ -1258,7 +1258,7 @@ g4x_update_plane(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
  	I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -1283,38 +1283,38 @@ g4x_update_plane(struct intel_plane *plane,
  	I915_WRITE_FW(DVSSURF(pipe),
  		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static void
  g4x_disable_plane(struct intel_plane *plane,
  		  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum pipe pipe = plane->pipe;
  	unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&i915->uncore.lock, irqflags);
I915_WRITE_FW(DVSCNTR(pipe), 0);
  	/* Disable the scaler */
  	I915_WRITE_FW(DVSSCALE(pipe), 0);
  	I915_WRITE_FW(DVSSURF(pipe), 0);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&i915->uncore.lock, irqflags);
  }
static bool
  g4x_plane_get_hw_state(struct intel_plane *plane,
  		       enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	enum intel_display_power_domain power_domain;
  	intel_wakeref_t wakeref;
  	bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return false;
@@ -1322,7 +1322,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane, *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return ret;
  }
@@ -1406,16 +1406,16 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
  		 struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  	int ret;
if (intel_fb_scalable(plane_state->base.fb)) {
-		if (INTEL_GEN(dev_priv) < 7) {
+		if (INTEL_GEN(i915) < 7) {
  			min_scale = 1;
  			max_scale = 16 << 16;
-		} else if (IS_IVYBRIDGE(dev_priv)) {
+		} else if (IS_IVYBRIDGE(i915)) {
  			min_scale = 1;
  			max_scale = 2 << 16;
  		}
@@ -1443,7 +1443,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
  	if (ret)
  		return ret;
- if (INTEL_GEN(dev_priv) >= 7)
+	if (INTEL_GEN(i915) >= 7)
  		plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
  	else
  		plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
@@ -1454,11 +1454,11 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
  int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	unsigned int rotation = plane_state->base.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
-	if (IS_CHERRYVIEW(dev_priv) &&
+	if (IS_CHERRYVIEW(i915) &&
  	    rotation & DRM_MODE_ROTATE_180 &&
  	    rotation & DRM_MODE_REFLECT_X) {
  		DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
@@ -1506,7 +1506,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
  			      const struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	unsigned int rotation = plane_state->base.rotation;
  	struct drm_format_name_buf format_name;
@@ -1540,7 +1540,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
  		 */
  		switch (fb->format->format) {
  		case DRM_FORMAT_RGB565:
-			if (INTEL_GEN(dev_priv) >= 11)
+			if (INTEL_GEN(i915) >= 11)
  				break;
  			/* fall through */
  		case DRM_FORMAT_C8:
@@ -1579,7 +1579,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
  static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
  					   const struct intel_plane_state *plane_state)
  {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
  		to_i915(plane_state->base.plane->dev);
  	int crtc_x = plane_state->base.dst.x1;
  	int crtc_w = drm_rect_width(&plane_state->base.dst);
@@ -1594,7 +1594,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
  	 * than the cursor ending less than 4 pixels from the left edge of the
  	 * screen may cause FIFO underflow and display corruption.
  	 */
-	if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+	if ((IS_GEMINILAKE(i915) || IS_CANNONLAKE(i915)) &&
  	    (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
  		DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
  			      crtc_x + crtc_w < 4 ? "end" : "start",
@@ -1627,7 +1627,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
  			   struct intel_plane_state *plane_state)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	const struct drm_framebuffer *fb = plane_state->base.fb;
  	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
@@ -1675,23 +1675,23 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(i915) >= 10 || IS_GEMINILAKE(i915))
  		plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
  							     plane_state);
return 0;
  }
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *i915)
  {
-	return INTEL_GEN(dev_priv) >= 9;
+	return INTEL_GEN(i915) >= 9;
  }
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
  				 const struct drm_intel_sprite_colorkey *set)
  {
  	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
  	struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
@@ -1708,7 +1708,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
  	 * On SKL+ we want dst key enabled on
  	 * the primary and not on the sprite.
  	 */
-	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+	if (INTEL_GEN(i915) >= 9 && plane->id != PLANE_PRIMARY &&
  	    set->flags & I915_SET_COLORKEY_DESTINATION)
  		key->flags = 0;
  }
@@ -1716,7 +1716,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
  int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
  				    struct drm_file *file_priv)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_intel_sprite_colorkey *set = data;
  	struct drm_plane *plane;
  	struct drm_plane_state *plane_state;
@@ -1734,7 +1734,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
  	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
  		return -EINVAL;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  	    set->flags & I915_SET_COLORKEY_DESTINATION)
  		return -EINVAL;
@@ -1747,7 +1747,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
  	 * Also multiple planes can't do destination keying on the same
  	 * pipe simultaneously.
  	 */
-	if (INTEL_GEN(dev_priv) >= 9 &&
+	if (INTEL_GEN(i915) >= 9 &&
  	    to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
  	    set->flags & I915_SET_COLORKEY_DESTINATION)
  		return -EINVAL;
@@ -1771,9 +1771,9 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
  		 * On some platforms we have to configure
  		 * the dst colorkey on the primary plane.
  		 */
-		if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+		if (!ret && has_dst_key_in_primary_plane(i915)) {
  			struct intel_crtc *crtc =
-				intel_get_crtc_for_pipe(dev_priv,
+				intel_get_crtc_for_pipe(i915,
  							to_intel_plane(plane)->pipe);
plane_state = drm_atomic_get_plane_state(state,
@@ -2197,26 +2197,26 @@ static const struct drm_plane_funcs skl_plane_funcs = {
  	.format_mod_supported = skl_plane_format_mod_supported,
  };
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+static bool skl_plane_has_fbc(struct drm_i915_private *i915,
  			      enum pipe pipe, enum plane_id plane_id)
  {
-	if (!HAS_FBC(dev_priv))
+	if (!HAS_FBC(i915))
  		return false;
return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
  }
-static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+static bool skl_plane_has_planar(struct drm_i915_private *i915,
  				 enum pipe pipe, enum plane_id plane_id)
  {
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(i915) >= 11)
  		return plane_id <= PLANE_SPRITE3;
/* Display WA #0870: skl, bxt */
-	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+	if (IS_SKYLAKE(i915) || IS_BROXTON(i915))
  		return false;
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+	if (IS_GEN(i915, 9) && !IS_GEMINILAKE(i915) && pipe == PIPE_C)
  		return false;
if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -2225,16 +2225,16 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
  	return true;
  }
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+static bool skl_plane_has_ccs(struct drm_i915_private *i915,
  			      enum pipe pipe, enum plane_id plane_id)
  {
  	if (plane_id == PLANE_CURSOR)
  		return false;
- if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(i915) >= 10)
  		return true;
- if (IS_GEMINILAKE(dev_priv))
+	if (IS_GEMINILAKE(i915))
  		return pipe != PIPE_C;
return pipe != PIPE_C &&
@@ -2243,7 +2243,7 @@ static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
  }
struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
+skl_universal_plane_create(struct drm_i915_private *i915,
  			   enum pipe pipe, enum plane_id plane_id)
  {
  	struct intel_plane *plane;
@@ -2263,9 +2263,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
  	plane->id = plane_id;
  	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
- plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+	plane->has_fbc = skl_plane_has_fbc(i915, pipe, plane_id);
  	if (plane->has_fbc) {
-		struct intel_fbc *fbc = &dev_priv->fbc;
+		struct intel_fbc *fbc = &i915->fbc;
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
  	}
@@ -2278,24 +2278,24 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
  	if (icl_is_nv12_y_plane(plane_id))
  		plane->update_slave = icl_update_slave;
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
-		if (icl_is_hdr_plane(dev_priv, plane_id)) {
+	if (skl_plane_has_planar(i915, pipe, plane_id)) {
+		if (icl_is_hdr_plane(i915, plane_id)) {
  			formats = icl_hdr_planar_formats;
  			num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
-		} else if (INTEL_GEN(dev_priv) >= 11) {
+		} else if (INTEL_GEN(i915) >= 11) {
  			formats = icl_planar_formats;
  			num_formats = ARRAY_SIZE(icl_planar_formats);
-		} else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
+		} else if (INTEL_GEN(i915) == 10 || IS_GEMINILAKE(i915)) {
  			formats = glk_planar_formats;
  			num_formats = ARRAY_SIZE(glk_planar_formats);
  		} else {
  			formats = skl_planar_formats;
  			num_formats = ARRAY_SIZE(skl_planar_formats);
  		}
-	} else if (icl_is_hdr_plane(dev_priv, plane_id)) {
+	} else if (icl_is_hdr_plane(i915, plane_id)) {
  		formats = icl_hdr_plane_formats;
  		num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
-	} else if (INTEL_GEN(dev_priv) >= 11) {
+	} else if (INTEL_GEN(i915) >= 11) {
  		formats = icl_plane_formats;
  		num_formats = ARRAY_SIZE(icl_plane_formats);
  	} else {
@@ -2303,7 +2303,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
  		num_formats = ARRAY_SIZE(skl_plane_formats);
  	}
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+	plane->has_ccs = skl_plane_has_ccs(i915, pipe, plane_id);
  	if (plane->has_ccs)
  		modifiers = skl_plane_format_modifiers_ccs;
  	else
@@ -2316,7 +2316,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+	ret = drm_universal_plane_init(&i915->drm, &plane->base,
  				       possible_crtcs, &skl_plane_funcs,
  				       formats, num_formats, modifiers,
  				       plane_type,
@@ -2329,7 +2329,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
  		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
  		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(i915) >= 10)
  		supported_rotations |= DRM_MODE_REFLECT_X;
drm_plane_create_rotation_property(&plane->base,
@@ -2361,7 +2361,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
  }
struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+intel_sprite_plane_create(struct drm_i915_private *i915,
  			  enum pipe pipe, int sprite)
  {
  	struct intel_plane *plane;
@@ -2373,15 +2373,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
  	int num_formats;
  	int ret;
- if (INTEL_GEN(dev_priv) >= 9)
-		return skl_universal_plane_create(dev_priv, pipe,
+	if (INTEL_GEN(i915) >= 9)
+		return skl_universal_plane_create(i915, pipe,
  						  PLANE_SPRITE0 + sprite);
plane = intel_plane_alloc();
  	if (IS_ERR(plane))
  		return plane;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		plane->max_stride = i9xx_plane_max_stride;
  		plane->update_plane = vlv_update_plane;
  		plane->disable_plane = vlv_disable_plane;
@@ -2393,7 +2393,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
  		modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
-	} else if (INTEL_GEN(dev_priv) >= 7) {
+	} else if (INTEL_GEN(i915) >= 7) {
  		plane->max_stride = g4x_sprite_max_stride;
  		plane->update_plane = ivb_update_plane;
  		plane->disable_plane = ivb_disable_plane;
@@ -2413,7 +2413,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
  		plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
-		if (IS_GEN(dev_priv, 6)) {
+		if (IS_GEN(i915, 6)) {
  			formats = snb_plane_formats;
  			num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -2426,7 +2426,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
  		}
  	}
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(i915) && pipe == PIPE_B) {
  		supported_rotations =
  			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
  			DRM_MODE_REFLECT_X;
@@ -2441,7 +2441,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+	ret = drm_universal_plane_init(&i915->drm, &plane->base,
  				       possible_crtcs, plane_funcs,
  				       formats, num_formats, modifiers,
  				       DRM_PLANE_TYPE_OVERLAY,
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
index 500f6bffb139..401295e05116 100644
--- a/drivers/gpu/drm/i915/intel_sprite.h
+++ b/drivers/gpu/drm/i915/intel_sprite.h
@@ -21,7 +21,7 @@ struct intel_plane_state;
  bool is_planar_yuv_format(u32 pixelformat);
  int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  			     int usecs);
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *i915,
  					      enum pipe pipe, int plane);
  int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
  				    struct drm_file *file_priv);
@@ -31,7 +31,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state);
  int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
  int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
  struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
+skl_universal_plane_create(struct drm_i915_private *i915,
  			   enum pipe pipe, enum plane_id plane_id);
static inline bool icl_is_nv12_y_plane(enum plane_id id)
@@ -49,10 +49,10 @@ static inline u8 icl_hdr_plane_mask(void)
  		BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
  }
-static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
+static inline bool icl_is_hdr_plane(struct drm_i915_private *i915,
  				    enum plane_id plane_id)
  {
-	return INTEL_GEN(dev_priv) >= 11 &&
+	return INTEL_GEN(i915) >= 11 &&
  		icl_hdr_plane_mask() & BIT(plane_id);
  }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5dc594eafaf2..4e66261bf5af 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -906,7 +906,7 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
  static bool
  intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 tmp = I915_READ(TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -920,10 +920,10 @@ intel_enable_tv(struct intel_encoder *encoder,
  		const struct drm_connector_state *conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
-	intel_wait_for_vblank(dev_priv,
+	intel_wait_for_vblank(i915,
  			      to_intel_crtc(pipe_config->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -935,7 +935,7 @@ intel_disable_tv(struct intel_encoder *encoder,
  		 const struct drm_connector_state *old_conn_state)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
  }
@@ -1084,7 +1084,7 @@ static void
  intel_tv_get_config(struct intel_encoder *encoder,
  		    struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct drm_display_mode *adjusted_mode =
  		&pipe_config->base.adjusted_mode;
  	struct drm_display_mode mode = {};
@@ -1158,15 +1158,15 @@ intel_tv_get_config(struct intel_encoder *encoder,
  		adjusted_mode->crtc_clock /= 2;
/* pixel counter doesn't work on i965gm TV output */
-	if (IS_I965GM(dev_priv))
+	if (IS_I965GM(i915))
  		adjusted_mode->private_flags |=
  			I915_MODE_FLAG_USE_SCANLINE_COUNTER;
  }
-static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+static bool intel_tv_source_too_wide(struct drm_i915_private *i915,
  				     int hdisplay)
  {
-	return IS_GEN(dev_priv, 3) && hdisplay > 1024;
+	return IS_GEN(i915, 3) && hdisplay > 1024;
  }
static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
@@ -1184,7 +1184,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
  			struct intel_crtc_state *pipe_config,
  			struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_tv_connector_state *tv_conn_state =
  		to_intel_tv_connector_state(conn_state);
  	const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
@@ -1209,7 +1209,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
  	intel_tv_mode_to_mode(adjusted_mode, tv_mode);
  	drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+	if (intel_tv_source_too_wide(i915, hdisplay) ||
  	    !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
  		int extra, top, bottom;
@@ -1306,7 +1306,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
  	 * the active portion. Hence following this formula seems
  	 * more trouble that it's worth.
  	 *
-	 * if (IS_GEN(dev_priv, 4)) {
+	 * if (IS_GEN(i915, 4)) {
  	 *	num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
  	 *	den = tv_mode->clock;
  	 * } else {
@@ -1327,7 +1327,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
  	adjusted_mode->name[0] = '\0';
/* pixel counter doesn't work on i965gm TV output */
-	if (IS_I965GM(dev_priv))
+	if (IS_I965GM(i915))
  		adjusted_mode->private_flags |=
  			I915_MODE_FLAG_USE_SCANLINE_COUNTER;
@@ -1335,7 +1335,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
  }
static void
-set_tv_mode_timings(struct drm_i915_private *dev_priv,
+set_tv_mode_timings(struct drm_i915_private *i915,
  		    const struct tv_mode *tv_mode,
  		    bool burst_ena)
  {
@@ -1393,7 +1393,7 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
  	I915_WRITE(TV_V_CTL_7, vctl7);
  }
-static void set_color_conversion(struct drm_i915_private *dev_priv,
+static void set_color_conversion(struct drm_i915_private *i915,
  				 const struct color_conversion *color_conversion)
  {
  	if (!color_conversion)
@@ -1417,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
  				const struct intel_crtc_state *pipe_config,
  				const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
  	struct intel_tv *intel_tv = enc_to_tv(encoder);
  	const struct intel_tv_connector_state *tv_conn_state =
@@ -1507,18 +1507,18 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
  		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
-	if (IS_I915GM(dev_priv))
+	if (IS_I915GM(i915))
  		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
- set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+	set_tv_mode_timings(i915, tv_mode, burst_ena);
I915_WRITE(TV_SC_CTL_1, scctl1);
  	I915_WRITE(TV_SC_CTL_2, scctl2);
  	I915_WRITE(TV_SC_CTL_3, scctl3);
- set_color_conversion(dev_priv, color_conversion);
+	set_color_conversion(i915, color_conversion);
- if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(i915) >= 4)
  		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
  	else
  		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1528,7 +1528,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
  			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
  			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+	assert_pipe_disabled(i915, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
  	tv_filter_ctl = TV_AUTO_SCALE;
@@ -1568,18 +1568,18 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
  	struct drm_crtc *crtc = connector->state->crtc;
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 tv_ctl, save_tv_ctl;
  	u32 tv_dac, save_tv_dac;
  	int type;
/* Disable TV interrupts around load detect or we'll recurse */
  	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		i915_disable_pipestat(dev_priv, 0,
+		spin_lock_irq(&i915->irq_lock);
+		i915_disable_pipestat(i915, 0,
  				      PIPE_HOTPLUG_INTERRUPT_STATUS |
  				      PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-		spin_unlock_irq(&dev_priv->irq_lock);
+		spin_unlock_irq(&i915->irq_lock);
  	}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1605,7 +1605,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
  	 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
  	 * the TV is misdetected. This is hardware requirement.
  	 */
-	if (IS_GM45(dev_priv))
+	if (IS_GM45(i915))
  		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
  			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1613,7 +1613,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
  	I915_WRITE(TV_DAC, tv_dac);
  	POSTING_READ(TV_DAC);
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+	intel_wait_for_vblank(i915, intel_crtc->pipe);
type = -1;
  	tv_dac = I915_READ(TV_DAC);
@@ -1643,15 +1643,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
  	POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
-	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+	intel_wait_for_vblank(i915, intel_crtc->pipe);
/* Restore interrupt config */
  	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		i915_enable_pipestat(dev_priv, 0,
+		spin_lock_irq(&i915->irq_lock);
+		i915_enable_pipestat(i915, 0,
  				     PIPE_HOTPLUG_INTERRUPT_STATUS |
  				     PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-		spin_unlock_irq(&dev_priv->irq_lock);
+		spin_unlock_irq(&i915->irq_lock);
  	}
return type;
@@ -1764,7 +1764,7 @@ intel_tv_set_mode_type(struct drm_display_mode *mode,
  static int
  intel_tv_get_modes(struct drm_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct drm_i915_private *i915 = to_i915(connector->dev);
  	const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
  	int i, count = 0;
@@ -1778,7 +1778,7 @@ intel_tv_get_modes(struct drm_connector *connector)
  			continue;
/* no vertical scaling with wide sources on gen3 */
-		if (IS_GEN(dev_priv, 3) && input->w > 1024 &&
+		if (IS_GEN(i915, 3) && input->w > 1024 &&
  		    input->h > intel_tv_mode_vdisplay(tv_mode))
  			continue;
@@ -1857,9 +1857,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
  };
void
-intel_tv_init(struct drm_i915_private *dev_priv)
+intel_tv_init(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct drm_connector *connector;
  	struct intel_tv *intel_tv;
  	struct intel_encoder *intel_encoder;
@@ -1872,7 +1872,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
  	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
  		return;
- if (!intel_bios_is_tv_present(dev_priv)) {
+	if (!intel_bios_is_tv_present(i915)) {
  		DRM_DEBUG_KMS("Integrated TV is not present.\n");
  		return;
  	}
@@ -1966,7 +1966,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
  	/* Create TV properties then attach current values */
  	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
  		/* 1080p50/1080p60 not supported on gen3 */
-		if (IS_GEN(dev_priv, 3) &&
+		if (IS_GEN(i915, 3) &&
  		    tv_modes[i].oversample == 1)
  			break;
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/intel_tv.h
index 44518575ec5c..fc0b89563b4f 100644
--- a/drivers/gpu/drm/i915/intel_tv.h
+++ b/drivers/gpu/drm/i915/intel_tv.h
@@ -8,6 +8,6 @@
struct drm_i915_private; -void intel_tv_init(struct drm_i915_private *dev_priv);
+void intel_tv_init(struct drm_i915_private *i915);
#endif /* __INTEL_TV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index a8e7f0ba7c3b..11e2dcf6b917 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -33,12 +33,12 @@ static void guc_free_load_err_log(struct intel_guc *guc);
/* Reset GuC providing us with fresh state for both GuC and HuC.
   */
-static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
+static int __intel_uc_reset_hw(struct drm_i915_private *i915)
  {
  	int ret;
  	u32 guc_status;
- ret = intel_reset_guc(dev_priv);
+	ret = intel_reset_guc(i915);
  	if (ret) {
  		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
  		return ret;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 3ea06c87dfcd..4ddd50d990b3 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -28,20 +28,20 @@
  #include "intel_huc.h"
  #include "i915_params.h"
-void intel_uc_init_early(struct drm_i915_private *dev_priv);
-void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-int intel_uc_init_misc(struct drm_i915_private *dev_priv);
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
-void intel_uc_sanitize(struct drm_i915_private *dev_priv);
-int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_uc_init(struct drm_i915_private *dev_priv);
-void intel_uc_fini(struct drm_i915_private *dev_priv);
+void intel_uc_init_early(struct drm_i915_private *i915);
+void intel_uc_cleanup_early(struct drm_i915_private *i915);
+void intel_uc_init_mmio(struct drm_i915_private *i915);
+int intel_uc_init_misc(struct drm_i915_private *i915);
+void intel_uc_fini_misc(struct drm_i915_private *i915);
+void intel_uc_sanitize(struct drm_i915_private *i915);
+int intel_uc_init_hw(struct drm_i915_private *i915);
+void intel_uc_fini_hw(struct drm_i915_private *i915);
+int intel_uc_init(struct drm_i915_private *i915);
+void intel_uc_fini(struct drm_i915_private *i915);
  void intel_uc_reset_prepare(struct drm_i915_private *i915);
  void intel_uc_suspend(struct drm_i915_private *i915);
  void intel_uc_runtime_suspend(struct drm_i915_private *i915);
-int intel_uc_resume(struct drm_i915_private *dev_priv);
+int intel_uc_resume(struct drm_i915_private *i915);
static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
  {
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index f342ddd47df8..d156911d1560 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -32,15 +32,15 @@
  /**
   * intel_uc_fw_fetch - fetch uC firmware
   *
- * @dev_priv: device private
+ * @i915: device private
   * @uc_fw: uC firmware
   *
   * Fetch uC firmware into GEM obj.
   */
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+void intel_uc_fw_fetch(struct drm_i915_private *i915,
  		       struct intel_uc_fw *uc_fw)
  {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = i915->drm.pdev;
  	struct drm_i915_gem_object *obj;
  	const struct firmware *fw = NULL;
  	struct uc_css_header *css;
@@ -48,10 +48,10 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
  	int err;
if (!uc_fw->path) {
-		dev_info(dev_priv->drm.dev,
+		dev_info(i915->drm.dev,
  			 "%s: No firmware was defined for %s!\n",
  			 intel_uc_fw_type_repr(uc_fw->type),
-			 intel_platform_name(INTEL_INFO(dev_priv)->platform));
+			 intel_platform_name(INTEL_INFO(i915)->platform));
  		return;
  	}
@@ -159,7 +159,7 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
  		goto fail;
  	}
- obj = i915_gem_object_create_shmem_from_data(dev_priv,
+	obj = i915_gem_object_create_shmem_from_data(i915,
  						     fw->data, fw->size);
  	if (IS_ERR(obj)) {
  		err = PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index ff98f8661d72..881c3aef074c 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -142,7 +142,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
  	return uc_fw->header_size + uc_fw->ucode_size;
  }
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+void intel_uc_fw_fetch(struct drm_i915_private *i915,
  		       struct intel_uc_fw *uc_fw);
  void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
  int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 85171a8b866a..87dec005165d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -537,10 +537,10 @@ void intel_uncore_runtime_resume(struct intel_uncore *uncore)
  	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
  }
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
+void intel_uncore_sanitize(struct drm_i915_private *i915)
  {
  	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_sanitize_gt_powersave(dev_priv);
+	intel_sanitize_gt_powersave(i915);
  }
static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
@@ -647,7 +647,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
   * @fw_domains: forcewake domains to get reference on
   *
   * See intel_uncore_forcewake_get(). This variant places the onus
- * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+ * on the caller to explicitly handle the i915->uncore.lock spinlock.
   */
  void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
  					enum forcewake_domains fw_domains)
@@ -708,7 +708,7 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
   * @fw_domains: forcewake domains to get reference on
   *
   * See intel_uncore_forcewake_put(). This variant places the onus
- * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+ * on the caller to explicitly handle the i915->uncore.lock spinlock.
   */
  void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
  					enum forcewake_domains fw_domains)
@@ -821,10 +821,10 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
  #define GEN_FW_RANGE(s, e, d) \
  	{ .start = (s), .end = (e), .domains = (d) }
-#define HAS_FWTABLE(dev_priv) \
-	(INTEL_GEN(dev_priv) >= 9 || \
-	 IS_CHERRYVIEW(dev_priv) || \
-	 IS_VALLEYVIEW(dev_priv))
+#define HAS_FWTABLE(i915) \
+	(INTEL_GEN(i915) >= 9 || \
+	 IS_CHERRYVIEW(i915) || \
+	 IS_VALLEYVIEW(i915))
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  static const struct intel_forcewake_range __vlv_fw_ranges[] = {
@@ -1461,7 +1461,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
  static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
  					 unsigned long action, void *data)
  {
-	struct drm_i915_private *dev_priv = container_of(nb,
+	struct drm_i915_private *i915 = container_of(nb,
  			struct drm_i915_private, uncore.pmic_bus_access_nb);
switch (action) {
@@ -1479,12 +1479,12 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
  		 * wake reference -> disable wakeref asserts for the time of
  		 * the access.
  		 */
-		disable_rpm_wakeref_asserts(dev_priv);
-		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-		enable_rpm_wakeref_asserts(dev_priv);
+		disable_rpm_wakeref_asserts(i915);
+		intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+		enable_rpm_wakeref_asserts(i915);
  		break;
  	case MBI_PMIC_BUS_ACCESS_END:
-		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  		break;
  	}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index d6af3de70121..d30bda2c3444 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -177,7 +177,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
  	return uncore->flags & UNCORE_HAS_FIFO;
  }
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+void intel_uncore_sanitize(struct drm_i915_private *i915);
  void intel_uncore_init_early(struct intel_uncore *uncore);
  int intel_uncore_init_mmio(struct intel_uncore *uncore);
  void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
@@ -361,7 +361,7 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
   *
   * Certain architectures will die if the same cacheline is concurrently accessed
   * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * therefore generally be serialised, by either the i915->uncore.lock or
   * a more localised lock guarding all access to that bank of registers.
   */
  #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 89ef14cafb6b..e2e9883d6b28 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -337,7 +337,7 @@ enum vbt_gmbus_ddi {
   * basically any of the fields to ensure the correct interpretation for the BDB
   * version in question.
   *
- * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
+ * When we copy the child device configs to i915->vbt.child_dev, we reserve
   * space for the full structure below, and initialize the tail not actually
   * present in VBT to zeros. Accessing those fields is fine, as long as the
   * default zero is taken into account, again according to the BDB version.
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index ffec807b8960..164fd4b56112 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -478,7 +478,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
  						const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
  	enum pipe pipe = crtc->pipe;
  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -897,7 +897,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
  		      const struct intel_crtc_state *crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	enum pipe pipe = crtc->pipe;
  	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
  	u32 dss_ctl1_val = 0;
@@ -907,7 +907,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
  		return;
/* Enable Power wells for VDSC/joining */
-	intel_display_power_get(dev_priv,
+	intel_display_power_get(i915,
  				intel_dsc_power_domain(crtc_state));
intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
@@ -933,7 +933,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
  void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
  {
  	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
  	enum pipe pipe = crtc->pipe;
  	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
  	u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
@@ -961,6 +961,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
  	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
-	intel_display_power_put_unchecked(dev_priv,
+	intel_display_power_put_unchecked(i915,
  					  intel_dsc_power_domain(old_crtc_state));
  }
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b4ba84b9fb8..646540b97267 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -225,7 +225,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
  	return 0;
  }
-static inline int write_and_verify(struct drm_i915_private *dev_priv,
+static inline int write_and_verify(struct drm_i915_private *i915,
  				   i915_reg_t reg, u32 val, u32 mask,
  				   u32 locked_bit)
  {
@@ -252,27 +252,27 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv,
   */
  int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
  {
-	struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
+	struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
  	u32 huc_agent;
  	u32 mask;
  	int err;
- if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
  		return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+	GEM_BUG_ON(!HAS_GUC(i915));
  	GEM_BUG_ON(!wopcm->guc.size);
  	GEM_BUG_ON(!wopcm->guc.base);
- err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
+	err = write_and_verify(i915, GUC_WOPCM_SIZE, wopcm->guc.size,
  			       GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
  			       GUC_WOPCM_SIZE_LOCKED);
  	if (err)
  		goto err_out;
- huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
+	huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0;
  	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
-	err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
+	err = write_and_verify(i915, DMA_GUC_WOPCM_OFFSET,
  			       wopcm->guc.base | huc_agent, mask,
  			       GUC_WOPCM_OFFSET_VALID);
  	if (err)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 2093d08a7569..2514842236f6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -147,17 +147,17 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
static int igt_ppgtt_alloc(void *arg)
  {
-	struct drm_i915_private *dev_priv = arg;
+	struct drm_i915_private *i915 = arg;
  	struct i915_ppgtt *ppgtt;
  	u64 size, last, limit;
  	int err = 0;
/* Allocate a ppggt and try to fill the entire range */ - if (!HAS_PPGTT(dev_priv))
+	if (!HAS_PPGTT(i915))
  		return 0;
- ppgtt = __ppgtt_create(dev_priv);
+	ppgtt = __ppgtt_create(i915);
  	if (IS_ERR(ppgtt))
  		return PTR_ERR(ppgtt);
@@ -208,9 +208,9 @@ static int igt_ppgtt_alloc(void *arg)
  	}
err_ppgtt_cleanup:
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
  	i915_vm_put(&ppgtt->vm);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
  	return err;
  }
@@ -992,7 +992,7 @@ static int shrink_boom(struct drm_i915_private *i915,
  	return err;
  }
-static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+static int exercise_ppgtt(struct drm_i915_private *i915,
  			  int (*func)(struct drm_i915_private *i915,
  				      struct i915_address_space *vm,
  				      u64 hole_start, u64 hole_end,
@@ -1003,15 +1003,15 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
  	IGT_TIMEOUT(end_time);
  	int err;
- if (!HAS_FULL_PPGTT(dev_priv))
+	if (!HAS_FULL_PPGTT(i915))
  		return 0;
- file = mock_file(dev_priv);
+	file = mock_file(i915);
  	if (IS_ERR(file))
  		return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
-	ppgtt = i915_ppgtt_create(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	ppgtt = i915_ppgtt_create(i915);
  	if (IS_ERR(ppgtt)) {
  		err = PTR_ERR(ppgtt);
  		goto out_unlock;
@@ -1019,13 +1019,13 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
  	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
  	GEM_BUG_ON(ppgtt->vm.closed);
- err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+	err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
  out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(dev_priv, file);
+	mock_file_free(i915, file);
  	return err;
  }
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 7fd0321e0947..ab3e71c957df 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -107,12 +107,12 @@ static int validate_client(struct intel_guc_client *client,
  			   int client_priority,
  			   bool is_preempt_client)
  {
-	struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+	struct drm_i915_private *i915 = guc_to_i915(client->guc);
  	struct i915_gem_context *ctx_owner = is_preempt_client ?
-			dev_priv->preempt_context : dev_priv->kernel_context;
+			i915->preempt_context : i915->kernel_context;
if (client->owner != ctx_owner ||
-	    client->engines != INTEL_INFO(dev_priv)->engine_mask ||
+	    client->engines != INTEL_INFO(i915)->engine_mask ||
  	    client->priority != client_priority ||
  	    client->doorbell_id == GUC_DOORBELL_INVALID)
  		return -EINVAL;
@@ -137,16 +137,16 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
   */
  static int igt_guc_clients(void *args)
  {
-	struct drm_i915_private *dev_priv = args;
+	struct drm_i915_private *i915 = args;
  	intel_wakeref_t wakeref;
  	struct intel_guc *guc;
  	int err = 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	wakeref = intel_runtime_pm_get(dev_priv);
+	GEM_BUG_ON(!HAS_GUC(i915));
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(i915);
- guc = &dev_priv->guc;
+	guc = &i915->guc;
  	if (!guc) {
  		pr_err("No guc object!\n");
  		err = -EINVAL;
@@ -227,8 +227,8 @@ static int igt_guc_clients(void *args)
  	guc_clients_create(guc);
  	guc_clients_enable(guc);
  unlock:
-	intel_runtime_pm_put(dev_priv, wakeref);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_runtime_pm_put(i915, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
  	return err;
  }
@@ -239,17 +239,17 @@ static int igt_guc_clients(void *args)
   */
  static int igt_guc_doorbells(void *arg)
  {
-	struct drm_i915_private *dev_priv = arg;
+	struct drm_i915_private *i915 = arg;
  	intel_wakeref_t wakeref;
  	struct intel_guc *guc;
  	int i, err = 0;
  	u16 db_id;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	wakeref = intel_runtime_pm_get(dev_priv);
+	GEM_BUG_ON(!HAS_GUC(i915));
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(i915);
- guc = &dev_priv->guc;
+	guc = &i915->guc;
  	if (!guc) {
  		pr_err("No guc object!\n");
  		err = -EINVAL;
@@ -261,10 +261,10 @@ static int igt_guc_doorbells(void *arg)
  		goto unlock;
for (i = 0; i < ATTEMPTS; i++) {
-		clients[i] = guc_client_alloc(dev_priv,
-					      INTEL_INFO(dev_priv)->engine_mask,
+		clients[i] = guc_client_alloc(i915,
+					      INTEL_INFO(i915)->engine_mask,
  					      i % GUC_CLIENT_PRIORITY_NUM,
-					      dev_priv->kernel_context);
+					      i915->kernel_context);
if (!clients[i]) {
  			pr_err("[%d] No guc client\n", i);
@@ -340,20 +340,20 @@ static int igt_guc_doorbells(void *arg)
  			guc_client_free(clients[i]);
  		}
  unlock:
-	intel_runtime_pm_put(dev_priv, wakeref);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_runtime_pm_put(i915, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
  	return err;
  }
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+int intel_guc_live_selftest(struct drm_i915_private *i915)
  {
  	static const struct i915_subtest tests[] = {
  		SUBTEST(igt_guc_clients),
  		SUBTEST(igt_guc_doorbells),
  	};
- if (!USES_GUC_SUBMISSION(dev_priv))
+	if (!USES_GUC_SUBMISSION(i915))
  		return 0;
- return i915_subtests(tests, dev_priv);
+	return i915_subtests(tests, i915);
  }
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e0d7ebecb215..b5304cfeda21 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -254,15 +254,15 @@ static int live_forcewake_ops(void *arg)
  static int live_forcewake_domains(void *arg)
  {
  #define FW_RANGE 0x40000
-	struct drm_i915_private *dev_priv = arg;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = arg;
+	struct intel_uncore *uncore = &i915->uncore;
  	unsigned long *valid;
  	u32 offset;
  	int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
-	    !IS_VALLEYVIEW(dev_priv) &&
-	    !IS_CHERRYVIEW(dev_priv))
+	if (!HAS_FPGA_DBG_UNCLAIMED(i915) &&
+	    !IS_VALLEYVIEW(i915) &&
+	    !IS_CHERRYVIEW(i915))
  		return 0;
/*
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index e272d826210a..0178bdf0c5a9 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -78,19 +78,19 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
  {
  	struct drm_encoder *encoder = &intel_dsi->base.base;
  	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
  		LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    MIPI_GEN_FIFO_STAT(port), mask, mask,
  				    100))
  		DRM_ERROR("DPI FIFOs are not empty\n");
  }
-static void write_data(struct drm_i915_private *dev_priv,
+static void write_data(struct drm_i915_private *i915,
  		       i915_reg_t reg,
  		       const u8 *data, u32 len)
  {
@@ -106,7 +106,7 @@ static void write_data(struct drm_i915_private *dev_priv,
  	}
  }
-static void read_data(struct drm_i915_private *dev_priv,
+static void read_data(struct drm_i915_private *i915,
  		      i915_reg_t reg,
  		      u8 *data, u32 len)
  {
@@ -125,7 +125,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
  {
  	struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
  	struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	enum port port = intel_dsi_host->port;
  	struct mipi_dsi_packet packet;
  	ssize_t ret;
@@ -154,13 +154,13 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
  	if (packet.payload_length) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_GEN_FIFO_STAT(port),
  					    data_mask, 0,
  					    50))
  			DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
- write_data(dev_priv, data_reg, packet.payload,
+		write_data(i915, data_reg, packet.payload,
  			   packet.payload_length);
  	}
@@ -168,7 +168,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
  		I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
  	}
- if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    MIPI_GEN_FIFO_STAT(port),
  				    ctrl_mask, 0,
  				    50)) {
@@ -180,13 +180,13 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
  	/* ->rx_len is set only for reads */
  	if (msg->rx_len) {
  		data_mask = GEN_READ_DATA_AVAIL;
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_INTR_STAT(port),
  					    data_mask, data_mask,
  					    50))
  			DRM_ERROR("Timeout waiting for read data.\n");
- read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+		read_data(i915, data_reg, msg->rx_buf, msg->rx_len);
  	}
/* XXX: fix for reads and writes */
@@ -221,7 +221,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
  {
  	struct drm_encoder *encoder = &intel_dsi->base.base;
  	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 mask;
/* XXX: pipe, hs */
@@ -240,7 +240,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
  	I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    MIPI_INTR_STAT(port), mask, mask,
  				    100))
  		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@@ -248,25 +248,25 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
  	return 0;
  }
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct drm_i915_private *i915)
  {
-	vlv_flisdsi_get(dev_priv);
+	vlv_flisdsi_get(i915);
- vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
-	vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
-	vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+	vlv_flisdsi_write(i915, 0x08, 0x0001);
+	vlv_flisdsi_write(i915, 0x0F, 0x0005);
+	vlv_flisdsi_write(i915, 0x0F, 0x0025);
  	udelay(150);
-	vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
-	vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+	vlv_flisdsi_write(i915, 0x0F, 0x0000);
+	vlv_flisdsi_write(i915, 0x08, 0x0000);
- vlv_flisdsi_put(dev_priv);
+	vlv_flisdsi_put(i915);
  }
static int intel_dsi_compute_config(struct intel_encoder *encoder,
  				    struct intel_crtc_state *pipe_config,
  				    struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
  						   base);
  	struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -281,7 +281,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
  	if (fixed_mode) {
  		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- if (HAS_GMCH(dev_priv))
+		if (HAS_GMCH(i915))
  			intel_gmch_panel_fitting(crtc, pipe_config,
  						 conn_state->scaling_mode);
  		else
@@ -300,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
  	else
  		pipe_config->pipe_bpp = 18;
- if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		/* Enable Frame time stamp based scanline reporting */
  		adjusted_mode->private_flags |=
  			I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -327,7 +327,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -359,7 +359,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_MIPIIO_PORT_POWERED,
  					    GLK_MIPIIO_PORT_POWERED,
@@ -378,14 +378,14 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
/* Wait for MIPI PHY status bit to set */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_PHY_STATUS_PORT_READY,
  					    GLK_PHY_STATUS_PORT_READY,
@@ -413,7 +413,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
  			I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
-			if (intel_wait_for_register(&dev_priv->uncore,
+			if (intel_wait_for_register(&i915->uncore,
  						    MIPI_CTRL(port),
  						    GLK_ULPS_NOT_ACTIVE,
  						    0,
@@ -440,7 +440,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_DATA_LANE_STOP_STATE,
  					    GLK_DATA_LANE_STOP_STATE,
@@ -450,7 +450,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for AFE LATCH */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    BXT_MIPI_PORT_CTRL(port),
  					    AFE_LATCHOUT,
  					    AFE_LATCHOUT,
@@ -461,7 +461,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
@@ -488,21 +488,21 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
DRM_DEBUG_KMS("\n"); - vlv_flisdsi_get(dev_priv);
+	vlv_flisdsi_get(i915);
  	/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
  	 * needed everytime after power gate */
-	vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
-	vlv_flisdsi_put(dev_priv);
+	vlv_flisdsi_write(i915, 0x04, 0x0004);
+	vlv_flisdsi_put(i915);
/* bandgap reset is needed after everytime we do power gate */
-	band_gap_reset(dev_priv);
+	band_gap_reset(i915);
for_each_dsi_port(port, intel_dsi->ports) { @@ -527,11 +527,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) static void intel_dsi_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (IS_GEMINILAKE(dev_priv))
+	if (IS_GEMINILAKE(i915))
  		glk_dsi_device_ready(encoder);
-	else if (IS_GEN9_LP(dev_priv))
+	else if (IS_GEN9_LP(i915))
  		bxt_dsi_device_ready(encoder);
  	else
  		vlv_dsi_device_ready(encoder);
@@ -539,7 +539,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
@@ -554,7 +554,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_PHY_STATUS_PORT_READY, 0, 20))
  			DRM_ERROR("PHY is not turning OFF\n");
@@ -562,7 +562,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for Pwr ACK bit to unset */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_MIPIIO_PORT_POWERED, 0, 20))
  			DRM_ERROR("MIPI IO Port is not powergated\n");
@@ -571,7 +571,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 tmp;
@@ -583,7 +583,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(&dev_priv->uncore,
+		if (intel_wait_for_register(&i915->uncore,
  					    MIPI_CTRL(port),
  					    GLK_PHY_STATUS_PORT_READY, 0, 20))
  			DRM_ERROR("PHY is not turning OFF\n");
@@ -605,14 +605,14 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
DRM_DEBUG_KMS("\n");
  	for_each_dsi_port(port, intel_dsi->ports) {
  		/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
-		i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+		i915_reg_t port_ctrl = IS_GEN9_LP(i915) ?
  			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
  		u32 val;
@@ -632,8 +632,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
  		 * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
  		 * Port A only. MIPI Port C has no similar bit for checking.
  		 */
-		if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
-		    intel_wait_for_register(&dev_priv->uncore,
+		if ((IS_GEN9_LP(i915) || port == PORT_A) &&
+		    intel_wait_for_register(&i915->uncore,
  					    port_ctrl, AFE_LATCHOUT, 0,
  					    30))
  			DRM_ERROR("DSI LP not going Low\n");
@@ -651,14 +651,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
  static void intel_dsi_port_enable(struct intel_encoder *encoder,
  				  const struct intel_crtc_state *crtc_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
  		u32 temp;
-		if (IS_GEN9_LP(dev_priv)) {
+		if (IS_GEN9_LP(i915)) {
  			for_each_dsi_port(port, intel_dsi->ports) {
  				temp = I915_READ(MIPI_CTRL(port));
  				temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
@@ -676,7 +676,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
  	}
for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+		i915_reg_t port_ctrl = IS_GEN9_LP(i915) ?
  			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
  		u32 temp;
@@ -688,7 +688,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
  		if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
  			temp |= (intel_dsi->dual_link - 1)
  						<< DUAL_LINK_MODE_SHIFT;
-			if (IS_BROXTON(dev_priv))
+			if (IS_BROXTON(i915))
  				temp |= LANE_CONFIGURATION_DUAL_LINK_A;
  			else
  				temp |= crtc->pipe ?
@@ -708,12 +708,12 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
  static void intel_dsi_port_disable(struct intel_encoder *encoder)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+		i915_reg_t port_ctrl = IS_GEN9_LP(i915) ?
  			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
  		u32 temp;
@@ -775,7 +775,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  {
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *i915 = to_i915(crtc->dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	int pipe = intel_crtc->pipe;
  	enum port port;
@@ -784,13 +784,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n"); - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_cpu_fifo_underrun_reporting(i915, pipe, true);
/*
  	 * The BIOS may leave the PLL in a wonky state where it doesn't
  	 * lock. It needs to be fully powered down to fix it.
  	 */
-	if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		bxt_dsi_pll_disable(encoder);
  		bxt_dsi_pll_enable(encoder, pipe_config);
  	} else {
@@ -798,7 +798,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  		vlv_dsi_pll_enable(encoder, pipe_config);
  	}
- if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		/* Add MIPI IO reset programming for modeset */
  		val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
  		I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
@@ -809,7 +809,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  		I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
  	}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		u32 val;
/* Disable DPOunit clock gating, can stall pipe */
@@ -818,7 +818,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  		I915_WRITE(DSPCLK_GATE_D, val);
  	}
- if (!IS_GEMINILAKE(dev_priv))
+	if (!IS_GEMINILAKE(i915))
  		intel_dsi_prepare(encoder, pipe_config);
/* Power on, try both CRC pmic gpio and VBT */
@@ -830,7 +830,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  	/* Deassert reset */
  	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -842,7 +842,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
  	intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
-	if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+	if (IS_GEMINILAKE(i915) && !glk_cold_boot)
  		intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -901,9 +901,9 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (IS_GEMINILAKE(dev_priv))
+	if (IS_GEMINILAKE(i915))
  		glk_dsi_clear_device_ready(encoder);
  	else
  		vlv_dsi_clear_device_ready(encoder);
@@ -913,7 +913,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
  				   const struct intel_crtc_state *pipe_config,
  				   const struct drm_connector_state *conn_state)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
@@ -941,7 +941,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
  	/* Transition to LP-00 */
  	intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		/* Power down DSI regulator to save power */
  		I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
  		I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
@@ -952,7 +952,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
  				val & ~MIPIO_RST_CTRL);
  	}
- if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		bxt_dsi_pll_disable(encoder);
  	} else {
  		u32 val;
@@ -983,7 +983,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
  static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
  				   enum pipe *pipe)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	intel_wakeref_t wakeref;
  	enum port port;
@@ -991,7 +991,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n"); - wakeref = intel_display_power_get_if_enabled(dev_priv,
+	wakeref = intel_display_power_get_if_enabled(i915,
  						     encoder->power_domain);
  	if (!wakeref)
  		return false;
@@ -1001,12 +1001,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
  	 * configuration, otherwise accessing DSI registers will hang the
  	 * machine. See BSpec North Display Engine registers/MIPI[BXT].
  	 */
-	if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
+	if (IS_GEN9_LP(i915) && !bxt_dsi_pll_is_enabled(i915))
  		goto out_put_power;
/* XXX: this only works for one DSI output */
  	for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
+		i915_reg_t ctrl_reg = IS_GEN9_LP(i915) ?
  			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
  		bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -1015,7 +1015,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
  		 * bit in port C control register does not get set. As a
  		 * workaround, check pipe B conf instead.
  		 */
-		if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+		if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
  		    port == PORT_C)
  			enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
@@ -1031,7 +1031,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
  		if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
  			continue;
- if (IS_GEN9_LP(dev_priv)) {
+		if (IS_GEN9_LP(i915)) {
  			u32 tmp = I915_READ(MIPI_CTRL(port));
  			tmp &= BXT_PIPE_SELECT_MASK;
  			tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1049,7 +1049,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
  	}
out_put_power:
-	intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+	intel_display_power_put(i915, encoder->power_domain, wakeref);
return active;
  }
@@ -1058,7 +1058,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
  				    struct intel_crtc_state *pipe_config)
  {
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct drm_display_mode *adjusted_mode =
  					&pipe_config->base.adjusted_mode;
  	struct drm_display_mode *adjusted_mode_sw;
@@ -1218,13 +1218,13 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
  static void intel_dsi_get_config(struct intel_encoder *encoder,
  				 struct intel_crtc_state *pipe_config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 pclk;
  	DRM_DEBUG_KMS("\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		bxt_dsi_get_pipe_config(encoder, pipe_config);
  		pclk = bxt_dsi_get_pclk(encoder, pipe_config);
  	} else {
@@ -1255,7 +1255,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
  			    const struct drm_display_mode *adjusted_mode)
  {
  	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
  	enum port port;
  	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1290,7 +1290,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
  	hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
-		if (IS_GEN9_LP(dev_priv)) {
+		if (IS_GEN9_LP(i915)) {
  			/*
  			 * Program hdisplay and vdisplay on MIPI transcoder.
  			 * This is different from calculated hactive and
@@ -1342,7 +1342,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  {
  	struct drm_encoder *encoder = &intel_encoder->base;
  	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
  	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1362,7 +1362,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  	}
for_each_dsi_port(port, intel_dsi->ports) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  			/*
  			 * escape clock divider, 20MHz, shared for A and C.
  			 * device ready must be off when doing this! txclkesc?
@@ -1377,7 +1377,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  			tmp &= ~READ_REQUEST_PRIORITY_MASK;
  			I915_WRITE(MIPI_CTRL(port), tmp |
  					READ_REQUEST_PRIORITY_HIGH);
-		} else if (IS_GEN9_LP(dev_priv)) {
+		} else if (IS_GEN9_LP(i915)) {
  			enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1415,7 +1415,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  	if (intel_dsi->clock_stop)
  		tmp |= CLOCKSTOP;
- if (IS_GEN9_LP(dev_priv)) {
+	if (IS_GEN9_LP(i915)) {
  		tmp |= BXT_DPHY_DEFEATURE_EN;
  		if (!is_cmd_mode(intel_dsi))
  			tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1466,7 +1466,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  		I915_WRITE(MIPI_INIT_COUNT(port),
  				txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
+		if (IS_GEN9_LP(i915) && (!intel_dsi->dual_link)) {
  			/*
  			 * BXT spec says write MIPI_INIT_COUNT for
  			 * both the ports, even if only one is
@@ -1500,7 +1500,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
  		 */
  		I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+		if (IS_GEMINILAKE(i915)) {
  			I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
  					intel_dsi->lp_byte_clk);
  			/* Shadow of DPHY reg */
@@ -1533,19 +1533,19 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
- if (IS_GEMINILAKE(dev_priv))
+	if (IS_GEMINILAKE(i915))
  		return;
for_each_dsi_port(port, intel_dsi->ports) {
  		/* Panel commands can be sent when clock is in LP11 */
  		I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- if (IS_GEN9_LP(dev_priv))
+		if (IS_GEN9_LP(i915))
  			bxt_dsi_reset_clocks(encoder, port);
  		else
  			vlv_dsi_reset_clocks(encoder, port);
@@ -1594,7 +1594,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
  static enum drm_panel_orientation
  vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	struct intel_encoder *encoder = connector->encoder;
  	enum intel_display_power_domain power_domain;
  	enum drm_panel_orientation orientation;
@@ -1607,11 +1607,11 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
  	if (!encoder->get_hw_state(encoder, &pipe))
  		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	crtc = intel_get_crtc_for_pipe(i915, pipe);
  	plane = to_intel_plane(crtc->base.primary);
power_domain = POWER_DOMAIN_PIPE(pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
  	if (!wakeref)
  		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
@@ -1624,7 +1624,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
  	else
  		orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+	intel_display_power_put(i915, power_domain, wakeref);
return orientation;
  }
@@ -1632,10 +1632,10 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
  static enum drm_panel_orientation
  vlv_dsi_get_panel_orientation(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
  	enum drm_panel_orientation orientation;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  		orientation = vlv_dsi_get_hw_panel_orientation(connector);
  		if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
  			return orientation;
@@ -1646,13 +1646,13 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
static void intel_dsi_add_properties(struct intel_connector *connector)
  {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->panel.fixed_mode) {
  		u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
-		if (!HAS_GMCH(dev_priv))
+		if (!HAS_GMCH(i915))
  			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(&connector->base,
@@ -1679,8 +1679,8 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
  static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
  {
  	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct mipi_config *mipi_config = i915->vbt.dsi.config;
  	u32 tlpx_ns, extra_byte_count, tlpx_ui;
  	u32 ui_num, ui_den;
  	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -1727,7 +1727,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
  	 * For GEMINILAKE dphy_param_reg will be programmed in terms of
  	 * HS byte clock count for other platform in HS ddr clock count
  	 */
-	mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+	mul = IS_GEMINILAKE(i915) ? 8 : 2;
  	ths_prepare_ns = max(mipi_config->ths_prepare,
  			     mipi_config->tclk_prepare);
@@ -1837,9 +1837,9 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
  	intel_dsi_log_params(intel_dsi);
  }
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct drm_i915_private *i915)
  {
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_device *dev = &i915->drm;
  	struct intel_dsi *intel_dsi;
  	struct intel_encoder *intel_encoder;
  	struct drm_encoder *encoder;
@@ -1851,13 +1851,13 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
  	DRM_DEBUG_KMS("\n");
/* There is no detection method for MIPI so rely on VBT */
-	if (!intel_bios_is_dsi_present(dev_priv, &port))
+	if (!intel_bios_is_dsi_present(i915, &port))
  		return;
- if (IS_GEN9_LP(dev_priv))
-		dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
+	if (IS_GEN9_LP(i915))
+		i915->mipi_mmio_base = BXT_MIPI_BASE;
  	else
-		dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+		i915->mipi_mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
  	if (!intel_dsi)
@@ -1897,20 +1897,20 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
  	 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
  	 * port C. BXT isn't limited like this.
  	 */
-	if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
  		intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
  	else if (port == PORT_A)
  		intel_encoder->crtc_mask = BIT(PIPE_A);
  	else
  		intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link)
+	if (i915->vbt.dsi.config->dual_link)
  		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
  	else
  		intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
-	intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+	intel_dsi->dcs_backlight_ports = i915->vbt.dsi.bl_ports;
+	intel_dsi->dcs_cabc_ports = i915->vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
  	for_each_dsi_port(port, intel_dsi->ports) {
@@ -1949,8 +1949,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
  	 * In case of BYT with CRC PMIC, we need to use GPIO for
  	 * Panel control.
  	 */
-	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	    (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
+	if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+	    (i915->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
  		intel_dsi->gpio_panel =
  			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 99cc3e2e9c2c..26655d5472b4 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -53,7 +53,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
  	return dsi_clk_khz;
  }
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct drm_i915_private *i915,
  			struct intel_crtc_state *config,
  			int target_dsi_clk)
  {
@@ -68,7 +68,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
  		return -ECHRNG;
  	}
- if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(i915)) {
  		ref_clk = 100000;
  		n = 4;
  		m_min = 70;
@@ -116,7 +116,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
  int vlv_dsi_pll_compute(struct intel_encoder *encoder,
  			struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	int ret;
  	u32 dsi_clk;
@@ -124,7 +124,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
  	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
  				    intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+	ret = dsi_calc_mnp(i915, config, dsi_clk);
  	if (ret) {
  		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
  		return ret;
@@ -147,15 +147,15 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
  void vlv_dsi_pll_enable(struct intel_encoder *encoder,
  			const struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
DRM_DEBUG_KMS("\n"); - vlv_cck_get(dev_priv);
+	vlv_cck_get(i915);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+	vlv_cck_write(i915, CCK_REG_DSI_PLL_CONTROL, 0);
+	vlv_cck_write(i915, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+	vlv_cck_write(i915, CCK_REG_DSI_PLL_CONTROL,
  		      config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO,
@@ -163,38 +163,38 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
  	 */
  	usleep_range(10, 50);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
+	vlv_cck_write(i915, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+	if (wait_for(vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) &
  						DSI_PLL_LOCK, 20)) {
- vlv_cck_put(dev_priv);
+		vlv_cck_put(i915);
  		DRM_ERROR("DSI PLL lock failed\n");
  		return;
  	}
-	vlv_cck_put(dev_priv);
+	vlv_cck_put(i915);
DRM_DEBUG_KMS("DSI PLL locked\n");
  }
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 tmp;
DRM_DEBUG_KMS("\n"); - vlv_cck_get(dev_priv);
+	vlv_cck_get(i915);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL);
  	tmp &= ~DSI_PLL_VCO_EN;
  	tmp |= DSI_PLL_LDO_GATE;
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+	vlv_cck_write(i915, CCK_REG_DSI_PLL_CONTROL, tmp);
- vlv_cck_put(dev_priv);
+	vlv_cck_put(i915);
  }
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *i915)
  {
  	bool enabled;
  	u32 val;
@@ -216,7 +216,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
  	 * causes a system hang.
  	 */
  	val = I915_READ(BXT_DSI_PLL_CTL);
-	if (IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEMINILAKE(i915)) {
  		if (!(val & BXT_DSIA_16X_MASK)) {
  			DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
  			enabled = false;
@@ -233,7 +233,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	u32 val;
DRM_DEBUG_KMS("\n");
@@ -246,7 +246,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
  	 * PLL lock should deassert within 200us.
  	 * Wait up to 1ms before timing out.
  	 */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    BXT_DSI_PLL_ENABLE,
  				    BXT_DSI_PLL_LOCKED,
  				    0,
@@ -257,21 +257,21 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
  u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
  		     struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
  	u32 dsi_clock, pclk;
  	u32 pll_ctl, pll_div;
  	u32 m = 0, p = 0, n;
-	int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+	int refclk = IS_CHERRYVIEW(i915) ? 100000 : 25000;
  	int i;
DRM_DEBUG_KMS("\n"); - vlv_cck_get(dev_priv);
-	pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-	pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
-	vlv_cck_put(dev_priv);
+	vlv_cck_get(i915);
+	pll_ctl = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL);
+	pll_div = vlv_cck_read(i915, CCK_REG_DSI_PLL_DIVIDER);
+	vlv_cck_put(i915);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
  	config->dsi_pll.div = pll_div;
@@ -325,7 +325,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
  	u32 dsi_clk;
  	u32 dsi_ratio;
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
@@ -343,7 +343,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
  void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
  {
  	u32 temp;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
temp = I915_READ(MIPI_CTRL(port));
@@ -356,7 +356,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
  static void glk_dsi_program_esc_clock(struct drm_device *dev,
  				   const struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 dsi_rate = 0;
  	u32 pll_ratio = 0;
  	u32 ddr_clk = 0;
@@ -404,7 +404,7 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
  static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
  				   const struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
  	u32 tmp;
  	u32 dsi_rate = 0;
  	u32 pll_ratio = 0;
@@ -457,7 +457,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
  int bxt_dsi_pll_compute(struct intel_encoder *encoder,
  			struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
  	u32 dsi_clk;
@@ -472,7 +472,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
  	 */
  	dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
  		dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
  	} else {
@@ -496,7 +496,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
  	/* As per recommendation from hardware team,
  	 * Prog PVD ratio =1 if dsi ratio <= 50
  	 */
-	if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+	if (IS_BROXTON(i915) && dsi_ratio <= 50)
  		config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
return 0;
@@ -505,7 +505,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
  void bxt_dsi_pll_enable(struct intel_encoder *encoder,
  			const struct intel_crtc_state *config)
  {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
  	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
  	enum port port;
  	u32 val;
@@ -517,7 +517,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
  	POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
-	if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		for_each_dsi_port(port, intel_dsi->ports)
  			bxt_dsi_program_clocks(encoder->base.dev, port, config);
  	} else {
@@ -530,7 +530,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
  	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(&i915->uncore,
  				    BXT_DSI_PLL_ENABLE,
  				    BXT_DSI_PLL_LOCKED,
  				    BXT_DSI_PLL_LOCKED,
@@ -546,10 +546,10 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
  {
  	u32 tmp;
  	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
/* Clear old configurations */
-	if (IS_BROXTON(dev_priv)) {
+	if (IS_BROXTON(i915)) {
  		tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
  		tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
  		tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux