On Wed, 21 Jun 2017, Alan Tan <alan.chin.loon.tan@xxxxxxxxx> wrote: > From: Vivek Kasireddy <vivek.kasireddy@xxxxxxxxx> > > In addition to adding the registers asscociated with MIPI DSI > encoder/connector, we also ensure intel_bios_init() function > gets called before intel_gvt_init() so that we can detect the presence > of MIPI DSI from the VBT and decide whether to read the releveant > registers or not. This is needed because if we read MIPI registers > when it is not present, this leads to a hang. > > Also, since PIPECONF register reads 0x0 when MIPI DSI is enabled, > we try to augment functions like pipe_is_enabled() to ensure that > Vblank and Flip done events are forwarded to DomU by calling > intel_bios_is_dsi_present(). We do similar thing with plane > surface and MMIO write handler as well. > > Signed-off-by: Vivek Kasireddy <vivek.kasireddy@xxxxxxxxx> > Reviewed-by: He, Min <min.he@xxxxxxxxx> > --- > drivers/gpu/drm/i915/gvt/display.c | 277 ++++++-- > drivers/gpu/drm/i915/gvt/handlers.c | 1317 ++++++++++++++++++++++++++--------- > drivers/gpu/drm/i915/i915_drv.c | 8 +- > drivers/gpu/drm/i915/i915_irq.c | 17 +- > 4 files changed, 1242 insertions(+), 377 deletions(-) Please split up the patch. This is way too big to be reviewed or merged in one go. Especially the parts touching i915 proper must be split up to patches of their own. You also have tons of whitespace and other unrelated changes here. There are many helpful tips about patch size, here are a couple I like: * If you get a regression report with a bisect pointing to the commit, will you be able to spot the mistake reasonably quickly? * What is the impact of revert in the above case? Can you split up and rearrange the patch/series in a way that reverting the main functional change still leaves in all the prep work? BR, Jani. > > diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c > index 6d8fde8..8d9f329 100644 > --- a/drivers/gpu/drm/i915/gvt/display.c > +++ b/drivers/gpu/drm/i915/gvt/display.c > @@ -55,6 +55,20 @@ static int get_edp_pipe(struct intel_vgpu *vgpu) > return pipe; > } > > +int get_dsi_pipe(struct intel_vgpu *vgpu) > +{ > + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + u32 data = vgpu_vreg(vgpu, _MIPIA_CTRL); > + > + data &= BXT_PIPE_SELECT_MASK; > + data >>= BXT_PIPE_SELECT_SHIFT; > + > + if (data > PIPE_C) > + return -1; > + else > + return data; > +} > + > static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) > { > struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > @@ -80,58 +94,117 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) > if (edp_pipe_is_enabled(vgpu) && > get_edp_pipe(vgpu) == pipe) > return 1; > + > + if (intel_bios_is_dsi_present(dev_priv, NULL) && > + get_dsi_pipe(vgpu) == pipe) > + return 1; > + > return 0; > } > > +static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { > + { > +/* EDID with 1024x768 as its resolution */ > + /*Header*/ > + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, > + /* Vendor & Product Identification */ > + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, > + /* Version & Revision */ > + 0x01, 0x04, > + /* Basic Display Parameters & Features */ > + 0xa5, 0x34, 0x20, 0x78, 0x23, > + /* Color Characteristics */ > + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, > + /* Established Timings: maximum resolution is 1024x768 */ > + 0x21, 0x08, 0x00, > + /* Standard Timings. All invalid */ > + 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, > + 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, > + /* 18 Byte Data Blocks 1: invalid */ > + 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, > + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, > + /* 18 Byte Data Blocks 2: invalid */ > + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, > + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, > + /* 18 Byte Data Blocks 3: invalid */ > + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, > + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, > + /* 18 Byte Data Blocks 4: invalid */ > + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, > + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, > + /* Extension Block Count */ > + 0x00, > + /* Checksum */ > + 0xef, > + }, > + { > /* EDID with 1920x1200 as its resolution */ > -static unsigned char virtual_dp_monitor_edid[] = { > - /*Header*/ > - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, > - /* Vendor & Product Identification */ > - 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, > - /* Version & Revision */ > - 0x01, 0x04, > - /* Basic Display Parameters & Features */ > - 0xa5, 0x34, 0x20, 0x78, 0x23, > - /* Color Characteristics */ > - 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, > - /* Established Timings: maximum resolution is 1024x768 */ > - 0x21, 0x08, 0x00, > - /* > - * Standard Timings. > - * below new resolutions can be supported: > - * 1920x1080, 1280x720, 1280x960, 1280x1024, > - * 1440x900, 1600x1200, 1680x1050 > - */ > - 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, > - 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, > - /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ > - 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, > - 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, > - /* 18 Byte Data Blocks 2: invalid */ > - 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, > - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, > - /* 18 Byte Data Blocks 3: invalid */ > - 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, > - 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, > - /* 18 Byte Data Blocks 4: invalid */ > - 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, > - 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, > - /* Extension Block Count */ > - 0x00, > - /* Checksum */ > - 0x45, > + /*Header*/ > + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, > + /* Vendor & Product Identification */ > + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, > + /* Version & Revision */ > + 0x01, 0x04, > + /* Basic Display Parameters & Features */ > + 0xa5, 0x34, 0x20, 0x78, 0x23, > + /* Color Characteristics */ > + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, > + /* Established Timings: maximum resolution is 1024x768 */ > + 0x21, 0x08, 0x00, > + /* > + * Standard Timings. > + * below new resolutions can be supported: > + * 1920x1080, 1280x720, 1280x960, 1280x1024, > + * 1440x900, 1600x1200, 1680x1050 > + */ > + 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, > + 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, > + /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ > + 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, > + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, > + /* 18 Byte Data Blocks 2: invalid */ > + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, > + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, > + /* 18 Byte Data Blocks 3: invalid */ > + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, > + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, > + /* 18 Byte Data Blocks 4: invalid */ > + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, > + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, > + /* Extension Block Count */ > + 0x00, > + /* Checksum */ > + 0x45, > + }, > }; > > #define DPCD_HEADER_SIZE 0xb > > +/* let the virtual display supports DP1.2 */ > static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { > - 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 > + 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 > }; > > static void emulate_monitor_status_change(struct intel_vgpu *vgpu) > { > struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + > + if (IS_BROXTON(dev_priv)) { > + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | > + BXT_DE_PORT_HP_DDIB | > + BXT_DE_PORT_HP_DDIC); > + > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) > + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIA; > + > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) > + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIB; > + > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) > + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIC; > + return; > + } > + > vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | > SDE_PORTC_HOTPLUG_CPT | > SDE_PORTD_HOTPLUG_CPT); > @@ -140,14 +213,47 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) > vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | > SDE_PORTE_HOTPLUG_SPT); > > - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { > + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= > + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | > + TRANS_DDI_PORT_MASK); > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= > + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | > + (PORT_B << TRANS_DDI_PORT_SHIFT) | > + TRANS_DDI_FUNC_ENABLE); > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; > vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; > + } > > - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { > vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= > + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | > + TRANS_DDI_PORT_MASK); > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= > + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | > + (PORT_C << TRANS_DDI_PORT_SHIFT) | > + TRANS_DDI_FUNC_ENABLE); > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; > + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; > + } > > - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) > + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { > vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= > + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | > + TRANS_DDI_PORT_MASK); > + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= > + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | > + (PORT_D << TRANS_DDI_PORT_SHIFT) | > + TRANS_DDI_FUNC_ENABLE); > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; > + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; > + } > > if (IS_SKYLAKE(dev_priv) && > intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { > @@ -160,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) > GEN8_PORT_DP_A_HOTPLUG; > else > vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; > + > + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; > } > } > > @@ -175,9 +283,16 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) > } > > static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, > - int type) > + int type, unsigned int resolution, void *edid) > { > struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); > + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + > + if (intel_bios_is_dsi_present(dev_priv, NULL)) > + return 0; > + > + if (WARN_ON(resolution >= GVT_EDID_NUM)) > + return -EINVAL; > > port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); > if (!port->edid) > @@ -189,8 +304,12 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, > return -ENOMEM; > } > > - memcpy(port->edid->edid_block, virtual_dp_monitor_edid, > - EDID_SIZE); > + if (edid) > + memcpy(port->edid->edid_block, edid, EDID_SIZE); > + else > + memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], > + EDID_SIZE); > + > port->edid->data_valid = true; > > memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); > @@ -295,6 +414,62 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) > emulate_vblank(vgpu); > } > > +static void intel_gvt_vblank_work(struct work_struct *w) > +{ > + struct intel_gvt_pipe_info *pipe_info = container_of(w, > + struct intel_gvt_pipe_info, vblank_work); > + struct intel_gvt *gvt = pipe_info->gvt; > + struct intel_vgpu *vgpu; > + int id; > + > + mutex_lock(&gvt->lock); > + for_each_active_vgpu(gvt, vgpu, id) > + emulate_vblank_on_pipe(vgpu, pipe_info->pipe_num); > + mutex_unlock(&gvt->lock); > +} > + > +void intel_gvt_init_pipe_info(struct intel_gvt *gvt) > +{ > + int pipe; > + > + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { > + gvt->pipe_info[pipe].pipe_num = pipe; > + gvt->pipe_info[pipe].gvt = gvt; > + INIT_WORK(&gvt->pipe_info[pipe].vblank_work, > + intel_gvt_vblank_work); > + } > +} > + > +int bxt_setup_virtual_monitors(struct intel_vgpu *vgpu) > +{ > + struct intel_connector *p = NULL; > + int pipe = 0; > + int ret = 0; > + > + for_each_intel_connector(&vgpu->gvt->dev_priv->drm, p) { > + if (p->encoder->get_hw_state(p->encoder, &pipe) && > + p->detect_edid) { > + ret = setup_virtual_dp_monitor(vgpu, pipe, > + GVT_DP_A + pipe, 0, p->detect_edid); > + if (ret) > + return ret; > + } > + } > + return 0; > +} > + > +void bxt_clean_virtual_monitors(struct intel_vgpu *vgpu) > +{ > + int port = 0; > + > + for (port = PORT_A; port < INTEL_GVT_MAX_PORT; port++) { > + struct intel_vgpu_port *p = intel_vgpu_port(vgpu, port); > + > + if (p->edid) > + clean_virtual_dp_monitor(vgpu, port); > + } > +} > + > /** > * intel_vgpu_clean_display - clean vGPU virtual display emulation > * @vgpu: a vGPU > @@ -306,7 +481,9 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) > { > struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > > - if (IS_SKYLAKE(dev_priv)) > + if (IS_BROXTON(dev_priv)) > + bxt_clean_virtual_monitors(vgpu); > + else if (IS_SKYLAKE(dev_priv)) > clean_virtual_dp_monitor(vgpu, PORT_D); > else > clean_virtual_dp_monitor(vgpu, PORT_B); > @@ -322,16 +499,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) > * Zero on success, negative error code if failed. > * > */ > -int intel_vgpu_init_display(struct intel_vgpu *vgpu) > +int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) > { > struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > > intel_vgpu_init_i2c_edid(vgpu); > > - if (IS_SKYLAKE(dev_priv)) > - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); > + if (IS_BROXTON(dev_priv)) > + return bxt_setup_virtual_monitors(vgpu); > + else if (IS_SKYLAKE(dev_priv)) > + return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, resolution, NULL); > else > - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); > + return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, resolution, NULL); > } > > /** > diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c > index 1d45062..16bcc01 100644 > --- a/drivers/gpu/drm/i915/gvt/handlers.c > +++ b/drivers/gpu/drm/i915/gvt/handlers.c > @@ -62,12 +62,16 @@ > /* This reg could be accessed by unaligned address */ > #define F_UNALIGN (1 << 6) > > +extern int get_dsi_pipe(struct intel_vgpu *vgpu); > + > unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) > { > if (IS_BROADWELL(gvt->dev_priv)) > return D_BDW; > else if (IS_SKYLAKE(gvt->dev_priv)) > return D_SKL; > + else if (IS_BROXTON(gvt->dev_priv)) > + return D_BXT; > > return 0; > } > @@ -121,6 +125,7 @@ static int new_mmio_info(struct intel_gvt *gvt, > info->size = size; > info->length = (i + 4) < end ? 4 : (end - i); > info->addr_mask = addr_mask; > + info->ro_mask = ro_mask; > info->device = device; > info->read = read ? read : intel_vgpu_default_mmio_read; > info->write = write ? write : intel_vgpu_default_mmio_write; > @@ -150,15 +155,42 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) > #define fence_num_to_offset(num) \ > (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) > > + > +static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) > +{ > + switch (reason) { > + case GVT_FAILSAFE_UNSUPPORTED_GUEST: > + pr_err("Detected your guest driver doesn't support GVT-g.\n"); > + break; > + case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: > + pr_err("Graphics resource is not enough for the guest\n"); > + default: > + break; > + } > + pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); > + vgpu->failsafe = true; > +} > + > static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, > unsigned int fence_num, void *p_data, unsigned int bytes) > { > if (fence_num >= vgpu_fence_sz(vgpu)) { > - gvt_err("vgpu%d: found oob fence register access\n", > - vgpu->id); > - gvt_err("vgpu%d: total fence num %d access fence num %d\n", > - vgpu->id, vgpu_fence_sz(vgpu), fence_num); > + > + /* When guest access oob fence regs without access > + * pv_info first, we treat guest not supporting GVT, > + * and we will let vgpu enter failsafe mode. > + */ > + if (!vgpu->pv_notified) > + enter_failsafe_mode(vgpu, > + GVT_FAILSAFE_UNSUPPORTED_GUEST); > + > + if (!vgpu->mmio.disable_warn_untrack) { > + gvt_vgpu_err("found oob fence register access\n"); > + gvt_vgpu_err("total fence %d, access fence %d\n", > + vgpu_fence_sz(vgpu), fence_num); > + } > memset(p_data, 0, bytes); > + return -EINVAL; > } > return 0; > } > @@ -206,7 +238,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, > old = vgpu_vreg(vgpu, offset); > new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); > > - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { > + if (IS_SKYLAKE(vgpu->gvt->dev_priv) || IS_BROXTON(vgpu->gvt->dev_priv)) { > switch (offset) { > case FORCEWAKE_RENDER_GEN9_REG: > ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; > @@ -219,7 +251,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, > break; > default: > /*should not hit here*/ > - gvt_err("invalid forcewake offset 0x%x\n", offset); > + gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); > return -EINVAL; > } > } else { > @@ -357,18 +389,98 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > void *p_data, unsigned int bytes) > { > u32 data; > + struct drm_device *dev = &vgpu->gvt->dev_priv->drm; > + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); > > write_vreg(vgpu, offset, p_data, bytes); > data = vgpu_vreg(vgpu, offset); > > - if (data & PIPECONF_ENABLE) > + if (data & PIPECONF_ENABLE) { > vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; > - else > + dev->driver->enable_vblank(dev, pipe); > + } else { > vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; > + } > intel_gvt_check_vblank_emulation(vgpu->gvt); > return 0; > } > > +/* ascendingly sorted */ > +static i915_reg_t force_nonpriv_white_list[] = { > + GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) > + GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) > + GEN8_CS_CHICKEN1,//_MMIO(0x2580) > + _MMIO(0x2690), > + _MMIO(0x2694), > + _MMIO(0x2698), > + _MMIO(0x4de0), > + _MMIO(0x4de4), > + _MMIO(0x4dfc), > + GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010) > + _MMIO(0x7014), > + HDC_CHICKEN0,//_MMIO(0x7300) > + GEN8_HDC_CHICKEN1,//_MMIO(0x7304) > + _MMIO(0x7700), > + _MMIO(0x7704), > + _MMIO(0x7708), > + _MMIO(0x770c), > + _MMIO(0xb110), > + GEN8_L3SQCREG4,//_MMIO(0xb118) > + _MMIO(0xe100), > + _MMIO(0xe18c), > + _MMIO(0xe48c), > + _MMIO(0xe5f4), > +}; > + > +/* a simple bsearch */ > +static inline bool in_whitelist(unsigned int reg) > +{ > + int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); > + i915_reg_t *array = force_nonpriv_white_list; > + > + while (left < right) { > + int mid = (left + right)/2; > + > + if (reg > array[mid].reg) > + left = mid + 1; > + else if (reg < array[mid].reg) > + right = mid; > + else > + return true; > + } > + return false; > +} > + > +static int force_nonpriv_write(struct intel_vgpu *vgpu, > + unsigned int offset, void *p_data, unsigned int bytes) > +{ > + u32 reg_nonpriv = *(u32 *)p_data; > + int ret = -EINVAL; > + > + if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) { > + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", > + vgpu->id, offset, bytes); > + return ret; > + } > + > + if (in_whitelist(reg_nonpriv)) { > + ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, > + bytes); > + } else { > + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n", > + vgpu->id, reg_nonpriv); > + } > + return ret; > +} > + > +static int pipe_dsl_mmio_read(struct intel_vgpu *vgpu, > + unsigned int offset, void *p_data, unsigned int bytes) > +{ > + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); > + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); > +} > + > static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > void *p_data, unsigned int bytes) > { > @@ -432,7 +544,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, > fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; > fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; > } else { > - gvt_err("Invalid train pattern %d\n", train_pattern); > + gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); > return -EINVAL; > } > > @@ -490,7 +602,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, > else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) > index = FDI_RX_IMR_TO_PIPE(offset); > else { > - gvt_err("Unsupport registers %x\n", offset); > + gvt_vgpu_err("Unsupport registers %x\n", offset); > return -EINVAL; > } > > @@ -624,6 +736,45 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > return 0; > } > > +static int skl_plane_surf_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); > + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); > + i915_reg_t reg_1ac = _MMIO(_REG_701AC(pipe, plane)); > + int flip_event = SKL_FLIP_EVENT(pipe, plane); > + > + write_vreg(vgpu, offset, p_data, bytes); > + vgpu_vreg(vgpu, reg_1ac) = vgpu_vreg(vgpu, offset); > + > + if (((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) || > + (intel_bios_is_dsi_present(dev_priv, NULL) && > + get_dsi_pipe(vgpu) == pipe)) && > + (vgpu->gvt->pipe_owner[pipe] == vgpu->id)) { > + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); > + } > + > + set_bit(flip_event, vgpu->irq.flip_done_event[pipe]); > + return 0; > +} > + > +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; > + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); > + > + write_vreg(vgpu, offset, p_data, bytes); > + if (((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) || > + (intel_bios_is_dsi_present(dev_priv, NULL) && > + get_dsi_pipe(vgpu) == pipe)) && > + (vgpu->gvt->pipe_owner[pipe] == vgpu->id)) { > + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); > + } > + return 0; > +} > + > static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, > unsigned int reg) > { > @@ -720,15 +871,16 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, > u32 data; > > if (!dpy_is_valid_port(port_index)) { > - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); > + gvt_vgpu_err("Unsupported DP port access!\n"); > return 0; > } > > write_vreg(vgpu, offset, p_data, bytes); > data = vgpu_vreg(vgpu, offset); > > - if (IS_SKYLAKE(vgpu->gvt->dev_priv) && > - offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { > + if ((IS_SKYLAKE(vgpu->gvt->dev_priv) || > + IS_BROXTON(vgpu->gvt->dev_priv)) && > + offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { > /* SKL DPB/C/D aux ctl register changed */ > return 0; > } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && > @@ -874,6 +1026,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, > return 0; > } > > +static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH); > + write_vreg(vgpu, offset, p_data, bytes); > + return 0; > +} > + > static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > void *p_data, unsigned int bytes) > { > @@ -918,8 +1078,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, > > if (i == num) { > if (num == SBI_REG_MAX) { > - gvt_err("vgpu%d: SBI caching meets maximum limits\n", > - vgpu->id); > + gvt_vgpu_err("SBI caching meets maximum limits\n"); > return; > } > display->sbi.number++; > @@ -999,8 +1158,9 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, > break; > } > if (invalid_read) > - gvt_err("invalid pvinfo read: [%x:%x] = %x\n", > + gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", > offset, bytes, *(u32 *)p_data); > + vgpu->pv_notified = true; > return 0; > } > > @@ -1026,7 +1186,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) > case 1: /* Remove this in guest driver. */ > break; > default: > - gvt_err("Invalid PV notification %d\n", notification); > + gvt_vgpu_err("Invalid PV notification %d\n", notification); > } > return ret; > } > @@ -1039,7 +1199,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) > char vmid_str[20]; > char display_ready_str[20]; > > - snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); > + snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready); > env[0] = display_ready_str; > > snprintf(vmid_str, 20, "VMID=%d", vgpu->id); > @@ -1078,8 +1238,11 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > case _vgtif_reg(execlist_context_descriptor_lo): > case _vgtif_reg(execlist_context_descriptor_hi): > break; > + case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): > + enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); > + break; > default: > - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", > + gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", > offset, bytes, data); > break; > } > @@ -1203,26 +1366,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, > u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); > > switch (cmd) { > - case 0x6: > - /** > - * "Read memory latency" command on gen9. > - * Below memory latency values are read > - * from skylake platform. > - */ > - if (!*data0) > - *data0 = 0x1e1a1100; > - else > - *data0 = 0x61514b3d; > + case GEN9_PCODE_READ_MEM_LATENCY: > + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { > + /** > + * "Read memory latency" command on gen9. > + * Below memory latency values are read > + * from skylake platform. > + */ > + if (!*data0) > + *data0 = 0x1e1a1100; > + else > + *data0 = 0x61514b3d; > + } > break; > - case 0x5: > + case SKL_PCODE_CDCLK_CONTROL: > + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) > + *data0 = SKL_CDCLK_READY_FOR_CHANGE; > + break; > + case GEN6_PCODE_READ_RC6VIDS: > *data0 |= 0x1; > break; > } > > gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", > vgpu->id, value, *data0); > - > - value &= ~(1 << 31); > + /** > + * PCODE_READY clear means ready for pcode read/write, > + * PCODE_ERROR_MASK clear means no error happened. In GVT-g we > + * always emulate as pcode read/write success and ready for access > + * anytime, since we don't touch real physical registers here. > + */ > + value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK); > return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); > } > > @@ -1231,10 +1405,18 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, > { > u32 v = *(u32 *)p_data; > > - v &= (1 << 31) | (1 << 29) | (1 << 9) | > - (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); > + if (IS_BROXTON(vgpu->gvt->dev_priv)) > + v &= (1 << 31) | (1 << 29); > + else > + v &= (1 << 31) | (1 << 29) | (1 << 9) | > + (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); > v |= (v >> 1); > > + vgpu_vreg(vgpu, i915_mmio_reg_offset(SKL_FUSE_STATUS)) = > + (SKL_FUSE_PG0_DIST_STATUS > + | SKL_FUSE_PG1_DIST_STATUS > + | SKL_FUSE_PG2_DIST_STATUS); > + > return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); > } > > @@ -1276,6 +1458,125 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, > return 0; > } > > +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + if (v & BXT_DE_PLL_PLL_ENABLE) > + v |= BXT_DE_PLL_LOCK; > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > +static int bxt_dsi_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 data; > + struct drm_device *dev = &vgpu->gvt->dev_priv->drm; > + > + write_vreg(vgpu, offset, p_data, bytes); > + data = vgpu_vreg(vgpu, offset); > + /* > + if (data & BXT_DSI_PLL_DO_ENABLE) > + dev->driver->enable_vblank(dev, vgpu->pipe); > + */ > + > + return 0; > +} > + > +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + if (v & PORT_PLL_ENABLE) > + v |= PORT_PLL_LOCK; > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > +static int bxt_dbuf_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + if (v & DBUF_POWER_REQUEST) > + v |= DBUF_POWER_STATE; > + else > + v &= ~DBUF_POWER_STATE; > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; > + > + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; > + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; > + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = vgpu_vreg(vgpu, offset); > + v &= ~UNIQUE_TRANGE_EN_METHOD; > + > + vgpu_vreg(vgpu, offset) = v; > + > + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); > +} > + > +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + > + if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { > + vgpu_vreg(vgpu, offset - 0x600) = v; > + vgpu_vreg(vgpu, offset - 0x800) = v; > + } else { > + vgpu_vreg(vgpu, offset - 0x400) = v; > + vgpu_vreg(vgpu, offset - 0x600) = v; > + } > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, unsigned int offset, > + void *p_data, unsigned int bytes) > +{ > + u32 v = *(u32 *)p_data; > + > + if (v & BIT(0)) { > + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_RESERVED; > + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; > + } > + > + if (v & BIT(1)) { > + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_RESERVED; > + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= PHY_POWER_GOOD; > + } > + > + > + vgpu_vreg(vgpu, offset) = v; > + > + return 0; > +} > + > static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu, > unsigned int offset, void *p_data, unsigned int bytes) > { > @@ -1302,7 +1603,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > if (execlist->elsp_dwords.index == 3) { > ret = intel_vgpu_submit_execlist(vgpu, ring_id); > if(ret) > - gvt_err("fail submit workload on ring %d\n", ring_id); > + gvt_vgpu_err("fail submit workload on ring %d\n", > + ring_id); > } > > ++execlist->elsp_dwords.index; > @@ -1318,6 +1620,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > bool enable_execlist; > > write_vreg(vgpu, offset, p_data, bytes); > + > + /* when PPGTT mode enabled, we will check if guest has called > + * pvinfo, if not, we will treat this guest as non-gvtg-aware > + * guest, and stop emulating its cfg space, mmio, gtt, etc. > + */ > + if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || > + (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) > + && !vgpu->pv_notified) { > + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); > + return 0; > + } > if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) > || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { > enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); > @@ -1400,6 +1713,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, > #define MMIO_GM(reg, d, r, w) \ > MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) > > +#define MMIO_GM_RDR(reg, d, r, w) \ > + MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w) > + > #define MMIO_RO(reg, d, f, rm, r, w) \ > MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) > > @@ -1419,81 +1735,146 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, > #define MMIO_RING_GM(prefix, d, r, w) \ > MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) > > +#define MMIO_RING_GM_RDR(prefix, d, r, w) \ > + MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w) > + > #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ > MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) > > +#define MMIO_PIPES_SDH(prefix, plane, s, d, r, w) do { \ > + int pipe; \ > + for_each_pipe(dev_priv, pipe) \ > + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ > +} while (0) > + > +#define MMIO_PLANES_SDH(prefix, s, d, r, w) do { \ > + int pipe, plane; \ > + for_each_pipe(dev_priv, pipe) \ > + for_each_universal_plane(dev_priv, pipe, plane) \ > + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ > +} while (0) > + > +#define MMIO_PLANES_DH(prefix, d, r, w) \ > + MMIO_PLANES_SDH(prefix, 4, d, r, w) > + > +#define MMIO_PORT_CL_REF(phy) \ > + MMIO_D(BXT_PORT_CL1CM_DW0(phy), D_BXT); \ > + MMIO_D(BXT_PORT_CL1CM_DW9(phy), D_BXT); \ > + MMIO_D(BXT_PORT_CL1CM_DW10(phy), D_BXT); \ > + MMIO_D(BXT_PORT_CL1CM_DW28(phy), D_BXT); \ > + MMIO_D(BXT_PORT_CL1CM_DW30(phy), D_BXT); \ > + MMIO_D(BXT_PORT_CL2CM_DW6(phy), D_BXT); \ > + MMIO_D(BXT_PORT_REF_DW3(phy), D_BXT); \ > + MMIO_D(BXT_PORT_REF_DW6(phy), D_BXT); \ > + MMIO_D(BXT_PORT_REF_DW8(phy), D_BXT) > + > +#define MMIO_PORT_PCS_TX(phy, ch) \ > + MMIO_D(BXT_PORT_PLL_EBB_0(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_PLL_EBB_4(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_PCS_DW10_LN01(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_PCS_DW10_GRP(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_PCS_DW12_LN01(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_PCS_DW12_LN23(phy, ch), D_BXT); \ > + MMIO_DH(BXT_PORT_PCS_DW12_GRP(phy, ch), D_BXT, NULL, bxt_pcs_dw12_grp_write); \ > + MMIO_D(BXT_PORT_TX_DW2_LN0(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW2_GRP(phy, ch), D_BXT); \ > + MMIO_DH(BXT_PORT_TX_DW3_LN0(phy, ch), D_BXT, bxt_port_tx_dw3_read, NULL); \ > + MMIO_D(BXT_PORT_TX_DW3_GRP(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW4_LN0(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW4_GRP(phy, ch), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 0), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 1), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 2), D_BXT); \ > + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 3), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 0), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 1), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 2), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 3), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 6), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 8), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 9), D_BXT); \ > + MMIO_D(BXT_PORT_PLL(phy, ch, 10), D_BXT) > + > static int init_generic_mmio_info(struct intel_gvt *gvt) > { > struct drm_i915_private *dev_priv = gvt->dev_priv; > int ret; > > - MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); > + MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, > + intel_vgpu_reg_imr_handler); > > MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); > MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); > MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); > MMIO_D(SDEISR, D_ALL); > > - MMIO_RING_D(RING_HWSTAM, D_ALL); > + MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); > > - MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); > - MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); > - MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); > - MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); > + MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); > + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); > + MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); > + MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); > > #define RING_REG(base) (base + 0x28) > - MMIO_RING_D(RING_REG, D_ALL); > + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); > #undef RING_REG > > #define RING_REG(base) (base + 0x134) > - MMIO_RING_D(RING_REG, D_ALL); > + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); > #undef RING_REG > > - MMIO_GM(0x2148, D_ALL, NULL, NULL); > - MMIO_GM(CCID, D_ALL, NULL, NULL); > - MMIO_GM(0x12198, D_ALL, NULL, NULL); > + MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); > + MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); > + MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL); > MMIO_D(GEN7_CXT_SIZE, D_ALL); > > - MMIO_RING_D(RING_TAIL, D_ALL); > - MMIO_RING_D(RING_HEAD, D_ALL); > - MMIO_RING_D(RING_CTL, D_ALL); > - MMIO_RING_D(RING_ACTHD, D_ALL); > - MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); > + MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); > > /* RING MODE */ > #define RING_REG(base) (base + 0x29c) > - MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); > + MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, > + ring_mode_mmio_write); > #undef RING_REG > > - MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); > + MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > + MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, > ring_timestamp_mmio_read, NULL); > MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, > ring_timestamp_mmio_read, NULL); > > - MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); > + MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > - > - MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); > - MMIO_D(GAM_ECOCHK, D_ALL); > - MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); > + MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + > + MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > - MMIO_D(0x9030, D_ALL); > - MMIO_D(0x20a0, D_ALL); > - MMIO_D(0x2420, D_ALL); > - MMIO_D(0x2430, D_ALL); > - MMIO_D(0x2434, D_ALL); > - MMIO_D(0x2438, D_ALL); > - MMIO_D(0x243c, D_ALL); > - MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); > + MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > - MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); > + MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > > /* display */ > MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); > @@ -1505,9 +1886,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_D(0xc4040, D_ALL); > MMIO_D(DERRMR, D_ALL); > > - MMIO_D(PIPEDSL(PIPE_A), D_ALL); > - MMIO_D(PIPEDSL(PIPE_B), D_ALL); > - MMIO_D(PIPEDSL(PIPE_C), D_ALL); > + MMIO_DH(PIPEDSL(PIPE_A), D_ALL, pipe_dsl_mmio_read, NULL); > + MMIO_DH(PIPEDSL(PIPE_B), D_ALL, pipe_dsl_mmio_read, NULL); > + MMIO_DH(PIPEDSL(PIPE_C), D_ALL, pipe_dsl_mmio_read, NULL); > MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); > > MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); > @@ -1551,71 +1932,71 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_D(0x70098, D_ALL); > MMIO_D(0x7009c, D_ALL); > > - MMIO_D(DSPCNTR(PIPE_A), D_ALL); > - MMIO_D(DSPADDR(PIPE_A), D_ALL); > - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); > - MMIO_D(DSPPOS(PIPE_A), D_ALL); > - MMIO_D(DSPSIZE(PIPE_A), D_ALL); > - MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); > - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); > - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); > - > - MMIO_D(DSPCNTR(PIPE_B), D_ALL); > - MMIO_D(DSPADDR(PIPE_B), D_ALL); > - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); > - MMIO_D(DSPPOS(PIPE_B), D_ALL); > - MMIO_D(DSPSIZE(PIPE_B), D_ALL); > - MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); > - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); > - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); > - > - MMIO_D(DSPCNTR(PIPE_C), D_ALL); > - MMIO_D(DSPADDR(PIPE_C), D_ALL); > - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); > - MMIO_D(DSPPOS(PIPE_C), D_ALL); > - MMIO_D(DSPSIZE(PIPE_C), D_ALL); > - MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); > - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); > - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); > - > - MMIO_D(SPRCTL(PIPE_A), D_ALL); > - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); > - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); > - MMIO_D(SPRPOS(PIPE_A), D_ALL); > - MMIO_D(SPRSIZE(PIPE_A), D_ALL); > - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); > - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); > - MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); > - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); > - MMIO_D(SPROFFSET(PIPE_A), D_ALL); > - MMIO_D(SPRSCALE(PIPE_A), D_ALL); > - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); > - > - MMIO_D(SPRCTL(PIPE_B), D_ALL); > - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); > - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); > - MMIO_D(SPRPOS(PIPE_B), D_ALL); > - MMIO_D(SPRSIZE(PIPE_B), D_ALL); > - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); > - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); > - MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); > - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); > - MMIO_D(SPROFFSET(PIPE_B), D_ALL); > - MMIO_D(SPRSCALE(PIPE_B), D_ALL); > - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); > - > - MMIO_D(SPRCTL(PIPE_C), D_ALL); > - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); > - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); > - MMIO_D(SPRPOS(PIPE_C), D_ALL); > - MMIO_D(SPRSIZE(PIPE_C), D_ALL); > - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); > - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); > - MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); > - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); > - MMIO_D(SPROFFSET(PIPE_C), D_ALL); > - MMIO_D(SPRSCALE(PIPE_C), D_ALL); > - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); > + MMIO_D(DSPCNTR(PIPE_A), D_BDW); > + MMIO_D(DSPADDR(PIPE_A), D_BDW); > + MMIO_D(DSPSTRIDE(PIPE_A), D_BDW); > + MMIO_D(DSPPOS(PIPE_A), D_BDW); > + MMIO_D(DSPSIZE(PIPE_A), D_BDW); > + MMIO_DH(DSPSURF(PIPE_A), D_BDW, NULL, pri_surf_mmio_write); > + MMIO_D(DSPOFFSET(PIPE_A), D_BDW); > + MMIO_D(DSPSURFLIVE(PIPE_A), D_BDW); > + > + MMIO_D(DSPCNTR(PIPE_B), D_BDW); > + MMIO_D(DSPADDR(PIPE_B), D_BDW); > + MMIO_D(DSPSTRIDE(PIPE_B), D_BDW); > + MMIO_D(DSPPOS(PIPE_B), D_BDW); > + MMIO_D(DSPSIZE(PIPE_B), D_BDW); > + MMIO_DH(DSPSURF(PIPE_B), D_BDW, NULL, pri_surf_mmio_write); > + MMIO_D(DSPOFFSET(PIPE_B), D_BDW); > + MMIO_D(DSPSURFLIVE(PIPE_B), D_BDW); > + > + MMIO_D(DSPCNTR(PIPE_C), D_BDW); > + MMIO_D(DSPADDR(PIPE_C), D_BDW); > + MMIO_D(DSPSTRIDE(PIPE_C), D_BDW); > + MMIO_D(DSPPOS(PIPE_C), D_BDW); > + MMIO_D(DSPSIZE(PIPE_C), D_BDW); > + MMIO_DH(DSPSURF(PIPE_C), D_BDW, NULL, pri_surf_mmio_write); > + MMIO_D(DSPOFFSET(PIPE_C), D_BDW); > + MMIO_D(DSPSURFLIVE(PIPE_C), D_BDW); > + > + MMIO_D(SPRCTL(PIPE_A), D_BDW); > + MMIO_D(SPRLINOFF(PIPE_A), D_BDW); > + MMIO_D(SPRSTRIDE(PIPE_A), D_BDW); > + MMIO_D(SPRPOS(PIPE_A), D_BDW); > + MMIO_D(SPRSIZE(PIPE_A), D_BDW); > + MMIO_D(SPRKEYVAL(PIPE_A), D_BDW); > + MMIO_D(SPRKEYMSK(PIPE_A), D_BDW); > + MMIO_DH(SPRSURF(PIPE_A), D_BDW, NULL, spr_surf_mmio_write); > + MMIO_D(SPRKEYMAX(PIPE_A), D_BDW); > + MMIO_D(SPROFFSET(PIPE_A), D_BDW); > + MMIO_D(SPRSCALE(PIPE_A), D_BDW); > + MMIO_D(SPRSURFLIVE(PIPE_A), D_BDW); > + > + MMIO_D(SPRCTL(PIPE_B), D_BDW); > + MMIO_D(SPRLINOFF(PIPE_B), D_BDW); > + MMIO_D(SPRSTRIDE(PIPE_B), D_BDW); > + MMIO_D(SPRPOS(PIPE_B), D_BDW); > + MMIO_D(SPRSIZE(PIPE_B), D_BDW); > + MMIO_D(SPRKEYVAL(PIPE_B), D_BDW); > + MMIO_D(SPRKEYMSK(PIPE_B), D_BDW); > + MMIO_DH(SPRSURF(PIPE_B), D_BDW, NULL, spr_surf_mmio_write); > + MMIO_D(SPRKEYMAX(PIPE_B), D_BDW); > + MMIO_D(SPROFFSET(PIPE_B), D_BDW); > + MMIO_D(SPRSCALE(PIPE_B), D_BDW); > + MMIO_D(SPRSURFLIVE(PIPE_B), D_BDW); > + > + MMIO_D(SPRCTL(PIPE_C), D_BDW); > + MMIO_D(SPRLINOFF(PIPE_C), D_BDW); > + MMIO_D(SPRSTRIDE(PIPE_C), D_BDW); > + MMIO_D(SPRPOS(PIPE_C), D_BDW); > + MMIO_D(SPRSIZE(PIPE_C), D_BDW); > + MMIO_D(SPRKEYVAL(PIPE_C), D_BDW); > + MMIO_D(SPRKEYMSK(PIPE_C), D_BDW); > + MMIO_DH(SPRSURF(PIPE_C), D_BDW, NULL, spr_surf_mmio_write); > + MMIO_D(SPRKEYMAX(PIPE_C), D_BDW); > + MMIO_D(SPROFFSET(PIPE_C), D_BDW); > + MMIO_D(SPRSCALE(PIPE_C), D_BDW); > + MMIO_D(SPRSURFLIVE(PIPE_C), D_BDW); > > MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL); > MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL); > @@ -1726,8 +2107,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > > MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); > MMIO_D(BLC_PWM_CPU_CTL, D_ALL); > - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); > - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); > + MMIO_D(BLC_PWM_PCH_CTL1, D_ALL & ~D_BXT); > + MMIO_D(BLC_PWM_PCH_CTL2, D_ALL & ~D_BXT); > > MMIO_D(0x48268, D_ALL); > > @@ -2022,8 +2403,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_D(FORCEWAKE_ACK, D_ALL); > MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); > MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); > - MMIO_D(GTFIFODBG, D_ALL); > - MMIO_D(GTFIFOCTL, D_ALL); > + MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); > MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); > MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); > MMIO_D(ECOBUS, D_ALL); > @@ -2071,8 +2452,15 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write); > MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); > > - MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL); > - > + MMIO_D(MAD_DIMM_C0, D_ALL); > + MMIO_D(MAD_DIMM_C1, D_ALL); > + MMIO_D(MAD_DIMM_C2, D_ALL); > + MMIO_D(MCH_SSKPD, D_ALL); > + MMIO_D(MCH_SECP_NRG_STTS, D_ALL); > + MMIO_D(GEN6_GT_PERF_STATUS, D_ALL); > + MMIO_D(BXT_GT_PERF_STATUS, D_ALL); > + MMIO_D(GEN6_RP_STATE_LIMITS, D_ALL); > + MMIO_D(D_COMP_HSW, D_ALL); > MMIO_D(TILECTL, D_ALL); > > MMIO_D(GEN6_UCGCTL1, D_ALL); > @@ -2080,7 +2468,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > > MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); > > - MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); > + MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW); > MMIO_D(GEN6_PCODE_DATA, D_ALL); > MMIO_D(0x13812c, D_ALL); > MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); > @@ -2102,7 +2490,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_D(0x7180, D_ALL); > MMIO_D(0x7408, D_ALL); > MMIO_D(0x7c00, D_ALL); > - MMIO_D(GEN6_MBCTL, D_ALL); > + MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); > MMIO_D(0x911c, D_ALL); > MMIO_D(0x9120, D_ALL); > MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); > @@ -2159,36 +2547,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_D(0x1a054, D_ALL); > > MMIO_D(0x44070, D_ALL); > - > - MMIO_D(0x215c, D_HSW_PLUS); > + MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); > > - MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); > - MMIO_D(GEN7_OACONTROL, D_HSW); > + MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL); > + MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL); > MMIO_D(0x2b00, D_BDW_PLUS); > MMIO_D(0x2360, D_BDW_PLUS); > - MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > > MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_D(BCS_SWCTRL, D_ALL); > - > - MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > - MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); > + MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL); > + > + MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > + MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); > MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); > MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); > MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); > @@ -2196,6 +2583,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) > MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); > MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > > + MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); > + MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > return 0; > } > > @@ -2204,7 +2602,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > struct drm_i915_private *dev_priv = gvt->dev_priv; > int ret; > > - MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, > + MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL, > intel_vgpu_reg_imr_handler); > > MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); > @@ -2269,24 +2667,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, > intel_vgpu_reg_master_irq_handler); > > - MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_D(0x1c134, D_BDW_PLUS); > - > - MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); > - MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > - MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); > - MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, > - NULL, NULL); > - MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, > - NULL, NULL); > + MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + > + MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, > + NULL, NULL); > + MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_CMD_ACCESS, NULL, NULL); > + MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); > + MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, > + NULL, NULL); > + MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, > + ring_mode_mmio_write); > + MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, > + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, > ring_timestamp_mmio_read, NULL); > > - MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); > + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > > #define RING_REG(base) (base + 0xd0) > MMIO_RING_F(RING_REG, 4, F_RO, 0, > @@ -2303,13 +2708,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > #undef RING_REG > > #define RING_REG(base) (base + 0x234) > - MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); > - MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); > + MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, > + NULL, NULL); > + MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0, > + ~0LL, D_BDW_PLUS, NULL, NULL); > #undef RING_REG > > #define RING_REG(base) (base + 0x244) > - MMIO_RING_D(RING_REG, D_BDW_PLUS); > - MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); > + MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, > + NULL, NULL); > #undef RING_REG > > #define RING_REG(base) (base + 0x370) > @@ -2331,6 +2739,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); > MMIO_D(0x1c054, D_BDW_PLUS); > > + MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); > + > MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); > MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); > > @@ -2341,14 +2751,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); > #undef RING_REG > > - MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); > - MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); > + MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); > + MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); > > MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > > - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); > - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); > - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); > + MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); > + MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); > + MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); > > MMIO_D(WM_MISC, D_BDW); > MMIO_D(BDW_EDP_PSR_BASE, D_BDW); > @@ -2362,27 +2772,30 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); > MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); > > - MMIO_D(0xfdc, D_BDW); > - MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); > - MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); > + MMIO_D(0xfdc, D_BDW_PLUS); > + MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > + MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > + MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > > - MMIO_D(0xb1f0, D_BDW); > - MMIO_D(0xb1c0, D_BDW); > + MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_D(0xb100, D_BDW); > - MMIO_D(0xb10c, D_BDW); > + MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL); > MMIO_D(0xb110, D_BDW); > > - MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > - MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, > + NULL, force_nonpriv_write); > + > + MMIO_D(0x44484, D_BDW_PLUS); > + MMIO_D(0x4448c, D_BDW_PLUS); > > - MMIO_D(0x83a4, D_BDW); > + MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL); > MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); > > - MMIO_D(0x8430, D_BDW); > + MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL); > > MMIO_D(0x110000, D_BDW_PLUS); > > @@ -2394,10 +2807,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) > MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > - MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); > - > - MMIO_D(0x2248, D_BDW); > - > + MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > + > + MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL); > + > + MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); > return 0; > } > > @@ -2420,7 +2842,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) > MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); > MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); > > - MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write); > MMIO_D(0xa210, D_SKL_PLUS); > MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); > MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); > @@ -2463,106 +2884,37 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) > MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write); > MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write); > > - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); > - > MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL); > MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL); > MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL); > > - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - > - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - > - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > - > MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); > > - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL); > - > MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL); > MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL); > MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL); > > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); > - > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); > - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL); > - > - MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL); > - MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL); > - > - MMIO_D(0x70380, D_SKL); > - MMIO_D(0x71380, D_SKL); > - MMIO_D(0x72380, D_SKL); > - MMIO_D(0x7039c, D_SKL); > +// MMIO_PLANES_DH(PLANE_COLOR_CTL, D_SKL, NULL, NULL); > + MMIO_PLANES_DH(PLANE_CTL, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_STRIDE, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_POS, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_SIZE, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_KEYVAL, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_KEYMSK, D_SKL, NULL, skl_plane_mmio_write); > + > + MMIO_PLANES_DH(PLANE_SURF, D_SKL, NULL, skl_plane_surf_write); > + > + MMIO_PLANES_DH(PLANE_KEYMAX, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_OFFSET, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(_REG_701C0, D_SKL, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(_REG_701C4, D_SKL, NULL, skl_plane_mmio_write); > + > + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_SKL, NULL, NULL); > + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL, NULL, NULL); > + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_SKL, NULL, NULL); > + MMIO_PLANES_DH(PLANE_BUF_CFG, D_SKL, NULL, NULL); > > MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL); > MMIO_D(0x8f074, D_SKL); > @@ -2578,16 +2930,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) > MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); > > MMIO_D(0xd08, D_SKL); > - MMIO_D(0x20e0, D_SKL); > - MMIO_D(0x20ec, D_SKL); > + MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); > + MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); > > /* TRTT */ > - MMIO_D(0x4de0, D_SKL); > - MMIO_D(0x4de4, D_SKL); > - MMIO_D(0x4de8, D_SKL); > - MMIO_D(0x4dec, D_SKL); > - MMIO_D(0x4df0, D_SKL); > - MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); > + MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); > MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); > > MMIO_D(0x45008, D_SKL); > @@ -2611,19 +2963,320 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) > MMIO_D(0x65f08, D_SKL); > MMIO_D(0x320f0, D_SKL); > > - MMIO_D(_REG_VCS2_EXCC, D_SKL); > + MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); > MMIO_D(0x70034, D_SKL); > MMIO_D(0x71034, D_SKL); > MMIO_D(0x72034, D_SKL); > > - MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL); > - MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL); > - MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL); > - MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL); > - MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL); > - MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); > - > MMIO_D(0x44500, D_SKL); > + MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, > + NULL, NULL); > + return 0; > +} > + > +static int init_bxt_mmio_info(struct intel_gvt *gvt) > +{ > + struct drm_i915_private *dev_priv = gvt->dev_priv; > + int ret; > + > + MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); > + MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL); > + MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); > + MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL); > + MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); > + MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); > + > + MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); > + MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); > + MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); > + > + MMIO_D(HSW_PWR_WELL_BIOS, D_BXT); > + MMIO_DH(HSW_PWR_WELL_DRIVER, D_BXT, NULL, skl_power_well_ctl_write); > + > + MMIO_D(0xa210, D_SKL_PLUS); > + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); > + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); > + MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); > + MMIO_DH(0x4ddc, D_BXT, NULL, skl_misc_ctl_write); > + MMIO_DH(0x42080, D_BXT, NULL, skl_misc_ctl_write); > + MMIO_D(0x45504, D_BXT); > + MMIO_D(0x45520, D_BXT); > + MMIO_D(0x46000, D_BXT); > + MMIO_DH(0x46010, D_BXT, NULL, skl_lcpll_write); > + MMIO_DH(0x46014, D_BXT, NULL, skl_lcpll_write); > + MMIO_D(0x6C040, D_BXT); > + MMIO_D(0x6C048, D_BXT); > + MMIO_D(0x6C050, D_BXT); > + MMIO_D(0x6C044, D_BXT); > + MMIO_D(0x6C04C, D_BXT); > + MMIO_D(0x6C054, D_BXT); > + MMIO_D(0x6c058, D_BXT); > + MMIO_D(0x6c05c, D_BXT); > + MMIO_DH(0X6c060, D_BXT, dpll_status_read, NULL); > + > + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_BXT, NULL, pf_write); > + > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_BXT, NULL, pf_write); > + > + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_BXT, NULL, pf_write); > + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_BXT, NULL, pf_write); > + > + MMIO_DH(CUR_BUF_CFG(PIPE_A), D_BXT, NULL, NULL); > + MMIO_DH(CUR_BUF_CFG(PIPE_B), D_BXT, NULL, NULL); > + MMIO_DH(CUR_BUF_CFG(PIPE_C), D_BXT, NULL, NULL); > + > + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); > + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); > + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); > + > + MMIO_DH(CUR_WM_TRANS(PIPE_A), D_BXT, NULL, NULL); > + MMIO_DH(CUR_WM_TRANS(PIPE_B), D_BXT, NULL, NULL); > + MMIO_DH(CUR_WM_TRANS(PIPE_C), D_BXT, NULL, NULL); > + > + MMIO_PLANES_DH(PLANE_CTL, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_STRIDE, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_POS, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_SIZE, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_KEYVAL, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_KEYMSK, D_BXT, NULL, skl_plane_mmio_write); > + > + MMIO_PLANES_DH(PLANE_SURF, D_BXT, NULL, skl_plane_surf_write); > + > + MMIO_PLANES_DH(PLANE_KEYMAX, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_OFFSET, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(_REG_701C0, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(_REG_701C4, D_BXT, NULL, skl_plane_mmio_write); > + > + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_WM_TRANS, D_BXT, NULL, skl_plane_mmio_write); > + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_BXT, NULL, NULL); > + MMIO_PLANES_DH(PLANE_BUF_CFG, D_BXT, NULL, NULL); > + > + MMIO_F(0x80000, 0x3000, 0, 0, 0, D_BXT, NULL, NULL); > + MMIO_D(0x8f074, D_BXT); > + MMIO_D(0x8f004, D_BXT); > + MMIO_D(0x8f034, D_BXT); > + > + MMIO_D(0xb11c, D_BXT); > + > + MMIO_D(0x51000, D_BXT); > + MMIO_D(0x6c00c, D_BXT); > + > + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); > + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); > + > + MMIO_D(0xd08, D_BXT); > + MMIO_D(0x20e0, D_BXT); > + MMIO_D(0x20ec, D_BXT); > + > + /* TRTT */ > + MMIO_D(0x4de0, D_BXT); > + MMIO_D(0x4de4, D_BXT); > + MMIO_D(0x4de8, D_BXT); > + MMIO_D(0x4dec, D_BXT); > + MMIO_D(0x4df0, D_BXT); > + MMIO_DH(0x4df4, D_BXT, NULL, gen9_trtte_write); > + MMIO_DH(0x4dfc, D_BXT, NULL, gen9_trtt_chicken_write); > + > + MMIO_DH(0x45008, D_BXT, NULL, bxt_dbuf_ctl_write); > + > + MMIO_D(0x46430, D_BXT); > + > + MMIO_D(0x46520, D_BXT); > + > + MMIO_D(0xc403c, D_BXT); > + MMIO_D(0xb004, D_BXT); > + MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); > + > + MMIO_D(0x65900, D_BXT); > + MMIO_D(0x1082c0, D_BXT); > + MMIO_D(0x4068, D_BXT); > + MMIO_D(0x67054, D_BXT); > + MMIO_D(0x6e560, D_BXT); > + MMIO_D(0x6e554, D_BXT); > + MMIO_D(0x2b20, D_BXT); > + MMIO_D(0x65f00, D_BXT); > + MMIO_D(0x65f08, D_BXT); > + MMIO_D(0x320f0, D_BXT); > + > + MMIO_D(_REG_VCS2_EXCC, D_BXT); > + MMIO_D(0x70034, D_BXT); > + MMIO_D(0x71034, D_BXT); > + MMIO_D(0x72034, D_BXT); > + > + MMIO_D(0x44500, D_BXT); > + > + MMIO_D(GEN8_GTCR, D_SKL_PLUS); > + > + MMIO_D(GEN7_SC_INSTDONE, D_SKL_PLUS); > + MMIO_D(GEN7_SAMPLER_INSTDONE, D_SKL_PLUS); > + MMIO_D(GEN7_ROW_INSTDONE, D_SKL_PLUS); > + MMIO_D(GEN8_FAULT_TLB_DATA0, D_SKL_PLUS); > + MMIO_D(GEN8_FAULT_TLB_DATA1, D_SKL_PLUS); > + MMIO_D(ERROR_GEN6, D_SKL_PLUS); > + MMIO_D(DONE_REG, D_SKL_PLUS); > + MMIO_D(EIR, D_SKL_PLUS); > + MMIO_D(PGTBL_ER, D_SKL_PLUS); > + MMIO_D(0x4194, D_SKL_PLUS); > + MMIO_D(0x4294, D_SKL_PLUS); > + MMIO_D(0x4494, D_SKL_PLUS); > + > + MMIO_RING_D(RING_PSMI_CTL, D_SKL_PLUS); > + MMIO_RING_D(RING_DMA_FADD, D_SKL_PLUS); > + MMIO_RING_D(RING_DMA_FADD_UDW, D_SKL_PLUS); > + MMIO_RING_D(RING_IPEHR, D_SKL_PLUS); > + MMIO_RING_D(RING_INSTPS, D_SKL_PLUS); > + MMIO_RING_D(RING_BBADDR_UDW, D_SKL_PLUS); > + MMIO_RING_D(RING_BBSTATE, D_SKL_PLUS); > + MMIO_RING_D(RING_IPEIR, D_SKL_PLUS); > + MMIO_RING_D(RING_INSTDONE, D_SKL_PLUS); > + > + MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS); > + MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_SKL_PLUS, NULL, NULL); > + MMIO_D(0xc4c8, D_SKL_PLUS); > + MMIO_D(GUC_BCS_RCS_IER, D_SKL_PLUS); > + MMIO_D(GUC_VCS2_VCS1_IER, D_SKL_PLUS); > + MMIO_D(GUC_WD_VECS_IER, D_SKL_PLUS); > + MMIO_D(GUC_MAX_IDLE_COUNT, D_SKL_PLUS); > + > + MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); > + MMIO_D(BXT_RP_STATE_CAP, D_BXT); > + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); > + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); > + MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); > + MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); > + MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); > + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); > + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, NULL, bxt_port_pll_enable_write); > + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); > + > + MMIO_PORT_CL_REF(DPIO_PHY0); > + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH0); > + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH1); > + MMIO_PORT_CL_REF(DPIO_PHY1); > + MMIO_PORT_PCS_TX(DPIO_PHY1, DPIO_CH0); > + > + MMIO_D(BXT_DE_PLL_CTL, D_BXT); > + MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); > + MMIO_D(BXT_DSI_PLL_CTL, D_BXT); > + MMIO_DH(BXT_DSI_PLL_ENABLE, D_BXT, NULL, bxt_dsi_pll_enable_write); > + > + MMIO_D(BXT_BLC_PWM_CTL(0), D_BXT); > + MMIO_D(BXT_BLC_PWM_FREQ(0), D_BXT); > + MMIO_D(BXT_BLC_PWM_DUTY(0), D_BXT); > + MMIO_D(BXT_BLC_PWM_CTL(1), D_BXT); > + MMIO_D(BXT_BLC_PWM_FREQ(1), D_BXT); > + MMIO_D(BXT_BLC_PWM_DUTY(1), D_BXT); > + > + MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); > + > + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); > + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); > + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); > + > + MMIO_D(RC6_LOCATION, D_BXT); > + MMIO_D(RC6_CTX_BASE, D_BXT); > + > + MMIO_D(0xA248, D_SKL_PLUS); > + MMIO_D(0xA250, D_SKL_PLUS); > + MMIO_D(0xA25C, D_SKL_PLUS); > + MMIO_D(0xA000, D_SKL_PLUS); > + MMIO_D(0xB100, D_SKL_PLUS); > + MMIO_D(0xD00, D_SKL_PLUS); > + > + return 0; > +} > + > +static int init_bxt_dsi_mmio_info(struct intel_gvt *gvt) > +{ > + int ret; > + > + MMIO_D(BXT_DSI_CFG, D_BXT); > + MMIO_D(BXT_DSI_TXCNTRL, D_BXT); > + > + MMIO_D(BXT_MIPI_CLOCK_CTL, D_BXT); > + MMIO_D(_BXT_MIPIA_TRANS_HACTIVE, D_BXT); > + MMIO_D(_BXT_MIPIC_TRANS_HACTIVE, D_BXT); > + MMIO_D(_BXT_MIPIA_TRANS_VACTIVE, D_BXT); > + MMIO_D(_BXT_MIPIC_TRANS_VACTIVE, D_BXT); > + MMIO_D(_BXT_MIPIA_TRANS_VTOTAL, D_BXT); > + MMIO_D(_BXT_MIPIC_TRANS_VTOTAL, D_BXT); > + MMIO_D(_BXT_MIPIA_PORT_CTRL, D_BXT); > + MMIO_D(_BXT_MIPIC_PORT_CTRL, D_BXT); > + > + MMIO_D(0x6b000, D_BXT); > + MMIO_D(0x6b80c, D_BXT); > + MMIO_D(0x6b104, D_BXT); > + MMIO_D(0x6b030, D_BXT); > + MMIO_D(0x6b028, D_BXT); > + MMIO_D(0x6b02c, D_BXT); > + MMIO_D(0x6b040, D_BXT); > + MMIO_D(0x6b038, D_BXT); > + MMIO_D(0x6b03c, D_BXT); > + MMIO_D(0x6b004, D_BXT); > + MMIO_D(0x6b048, D_BXT); > + MMIO_D(0x6b804, D_BXT); > + MMIO_D(0x6b808, D_BXT); > + MMIO_D(0x6b848, D_BXT); > + MMIO_D(0x6b074, D_BXT); > + MMIO_D(0x6b874, D_BXT); > + MMIO_D(0x6b05c, D_BXT); > + MMIO_D(0x6b00c, D_BXT); > + MMIO_D(0x6b800, D_BXT); > + MMIO_D(0x6b85c, D_BXT); > + MMIO_D(0x6b06c, D_BXT); > + MMIO_D(0x6b008, D_BXT); > + MMIO_D(0x6b080, D_BXT); > + MMIO_D(0x6b020, D_BXT); > + MMIO_D(0x6b904, D_BXT); > + MMIO_D(0x6b880, D_BXT); > + MMIO_D(0x6b820, D_BXT); > + MMIO_D(0x6b034, D_BXT); > + MMIO_D(0x6b834, D_BXT); > + MMIO_D(0x6b830, D_BXT); > + MMIO_D(0x6b828, D_BXT); > + MMIO_D(0x6b82c, D_BXT); > + MMIO_D(0x6b840, D_BXT); > + MMIO_D(0x6b838, D_BXT); > + MMIO_D(0x6b83c, D_BXT); > + MMIO_D(0x6b010, D_BXT); > + MMIO_D(0x6b014, D_BXT); > + MMIO_D(0x6b018, D_BXT); > + MMIO_D(0x6b01c, D_BXT); > + MMIO_D(0x6b050, D_BXT); > + MMIO_D(0x6b044, D_BXT); > + MMIO_D(0x6b060, D_BXT); > + MMIO_D(0x6b084, D_BXT); > + MMIO_D(0x6b088, D_BXT); > + MMIO_D(0x6b058, D_BXT); > + MMIO_D(0x6b810, D_BXT); > + MMIO_D(0x6b814, D_BXT); > + MMIO_D(0x6b818, D_BXT); > + MMIO_D(0x6b81c, D_BXT); > + MMIO_D(0x6b850, D_BXT); > + MMIO_D(0x6b844, D_BXT); > + MMIO_D(0x6b860, D_BXT); > + MMIO_D(0x6b884, D_BXT); > + MMIO_D(0x6b888, D_BXT); > + MMIO_D(0x6b858, D_BXT); > + MMIO_D(0x6b064, D_BXT); > + MMIO_D(0x1f040c, D_BXT); > + > return 0; > } > > @@ -2707,6 +3360,19 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) > ret = init_skl_mmio_info(gvt); > if (ret) > goto err; > + } else if (IS_BROXTON(dev_priv)) { > + ret = init_broadwell_mmio_info(gvt); > + if (ret) > + goto err; > + ret = init_bxt_mmio_info(gvt); > + if (ret) > + goto err; > + > + if (intel_bios_is_dsi_present(dev_priv, NULL)) { > + ret = init_bxt_dsi_mmio_info(gvt); > + if (ret) > + goto err; > + } > } > return 0; > err: > @@ -2813,3 +3479,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, > write_vreg(vgpu, offset, p_data, bytes); > return 0; > } > + > +/** > + * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be > + * force-nopriv register > + * > + * @gvt: a GVT device > + * @offset: register offset > + * > + * Returns: > + * True if the register is in force-nonpriv whitelist; > + * False if outside; > + */ > +bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, > + unsigned int offset) > +{ > + return in_whitelist(offset); > +} > diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c > index 1933735..b44c04e 100644 > --- a/drivers/gpu/drm/i915/i915_drv.c > +++ b/drivers/gpu/drm/i915/i915_drv.c > @@ -578,10 +578,6 @@ static int i915_load_modeset_init(struct drm_device *dev) > if (i915_inject_load_failure()) > return -ENODEV; > > - ret = intel_bios_init(dev_priv); > - if (ret) > - DRM_INFO("failed to find VBIOS tables\n"); > - > /* If we have > 1 VGA cards, then we need to arbitrate access > * to the common VGA resources. > * > @@ -1089,6 +1085,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) > intel_uncore_sanitize(dev_priv); > > intel_opregion_setup(dev_priv); > + ret = intel_bios_init(dev_priv); > + if (ret) > + DRM_INFO("failed to find VBIOS tables\n"); > + > > i915_gem_load_init_fences(dev_priv); > > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index 6912f51..13cf859 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -807,13 +807,16 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) > if (HAS_DDI(dev_priv) && !position) { > int i, temp; > > - for (i = 0; i < 100; i++) { > - udelay(1); > - temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & > - DSL_LINEMASK_GEN3; > - if (temp != position) { > - position = temp; > - break; > + if (!(intel_bios_is_dsi_present(dev_priv, NULL) && > + intel_vgpu_active(dev_priv))) { > + for (i = 0; i < 100; i++) { > + udelay(1); > + temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & > + DSL_LINEMASK_GEN3; > + if (temp != position) { > + position = temp; > + break; > + } > } > } > } -- Jani Nikula, Intel Open Source Technology Center _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx