This little helper only exists to safely discard the upper unused 32bits of the general 64-bit VMA address - as we know that all Global GTT currently are less than 4GiB in size and so that the upper bits must be zero. In many places, we use a u32 for the global GTT offset and we want to document where we are discarding the full VMA offset. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++--- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 6 +++--- drivers/gpu/drm/i915/i915_gem_context.c | 6 ++++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 7 +++++++ drivers/gpu/drm/i915/i915_guc_submission.c | 15 ++++++++------- drivers/gpu/drm/i915/intel_display.c | 11 +++++------ drivers/gpu/drm/i915/intel_fbdev.c | 6 +++--- drivers/gpu/drm/i915/intel_guc_loader.c | 6 +++--- drivers/gpu/drm/i915/intel_lrc.c | 20 +++++++++++--------- drivers/gpu/drm/i915/intel_overlay.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 28 ++++++++++++++-------------- 13 files changed, 64 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 28f14192fce5..f67c53baaa75 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2008,7 +2008,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, if (vma->flags & I915_VMA_GLOBAL_BIND) seq_printf(m, "\tBound in GGTT at 0x%08x\n", - lower_32_bits(vma->node.start)); + i915_ggtt_offset(vma)); if (i915_gem_object_get_pages(vma->obj)) { seq_puts(m, "\tFailed to get pages for context object\n\n"); @@ -2020,8 +2020,8 @@ static void i915_dump_lrc_obj(struct seq_file *m, u32 *reg_state = kmap_atomic(page); for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { - seq_printf(m, "\t[0x%08llx] 0x%08x 0x%08x 0x%08x 0x%08x\n", - vma->node.start + 4096 + (j * 4), + seq_printf(m, "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", + j * 4, reg_state[j], reg_state[j + 1], reg_state[j + 2], reg_state[j + 3]); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bfa2bdae0f7d..0a58d36923f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3323,7 +3323,7 @@ static inline unsigned long i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - return i915_gem_object_to_ggtt(o, view)->node.start; + return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); } /* i915_gem_fence.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19d81f28cbbe..54d277eac631 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -758,7 +758,7 @@ i915_gem_gtt_pread(struct drm_device *dev, i915_gem_object_pin_pages(obj); } else { - node.start = vma->node.start; + node.start = i915_ggtt_offset(vma); node.allocated = false; ret = i915_gem_object_put_fence(obj); if (ret) @@ -1062,7 +1062,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, i915_gem_object_pin_pages(obj); } else { - node.start = vma->node.start; + node.start = i915_ggtt_offset(vma); node.allocated = false; ret = i915_gem_object_put_fence(obj); if (ret) @@ -1703,7 +1703,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) goto err_unpin; /* Finally, remap it using the new GTT offset */ - pfn = ggtt->mappable_base + vma->node.start; + pfn = ggtt->mappable_base + i915_ggtt_offset(vma); pfn >>= PAGE_SHIFT; if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e566167d9441..98d2956f91f4 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -631,7 +631,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, req->ctx->engine[RCS].state->node.start | flags); + intel_ring_emit(ring, + i915_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -660,7 +661,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, last_reg); - intel_ring_emit(ring, engine->scratch->node.start); + intel_ring_emit(ring, + i915_ggtt_offset(engine->scratch)); intel_ring_emit(ring, MI_NOOP); } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7e963033863f..46c9a1ae701d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -370,7 +370,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, goto unpin; /* Map the page containing the relocation we're going to perform. */ - offset = vma->node.start; + offset = i915_ggtt_offset(vma); offset += reloc->offset; reloc_page = io_mapping_map_atomic_wc(ggtt->mappable, offset & PAGE_MASK); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 93422b0373af..9aefa3cbd126 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -275,6 +275,13 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, return vma->active & BIT(engine); } +static inline u32 i915_ggtt_offset(const struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(upper_32_bits(vma->node.start)); + return lower_32_bits(vma->node.start); +} + struct i915_page_dma { struct page *page; union { diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 991051ff8a1d..83c6476d5583 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -358,11 +358,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc, /* The state page is after PPHWSP */ lrc->ring_lcra = - ce->state->node.start + LRC_STATE_PN * PAGE_SIZE; + i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); - lrc->ring_begin = ce->ring->vma->node.start; + lrc->ring_begin = i915_ggtt_offset(ce->ring->vma); lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; @@ -378,7 +378,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ - gfx_addr = client->client->node.start; + gfx_addr = i915_ggtt_offset(client->client); desc.db_trigger_phy = sg_dma_address(client->client->pages->sgl) + client->doorbell_offset; desc.db_trigger_cpu = (uintptr_t)client->client_base + @@ -862,7 +862,7 @@ static void guc_create_log(struct intel_guc *guc) (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - offset = vma->node.start >> PAGE_SHIFT; /* in pages */ + offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } @@ -933,7 +933,8 @@ static void guc_create_ads(struct intel_guc *guc) policies = (void *)ads + sizeof(struct guc_ads); init_guc_policies(policies); - ads->scheduler_policies = vma->node.start + sizeof(struct guc_ads); + ads->scheduler_policies = + i915_ggtt_offset(vma) + sizeof(struct guc_ads); /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); @@ -1061,7 +1062,7 @@ int intel_guc_suspend(struct drm_device *dev) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = ctx->engine[RCS].state->node.start; + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } @@ -1086,7 +1087,7 @@ int intel_guc_resume(struct drm_device *dev) data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = ctx->engine[RCS].state->node.start; + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b0fdb873eda2..2b3197ce7557 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2844,7 +2844,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, { struct i915_ggtt_view view; struct i915_vma *vma; - u64 offset; + u32 offset; intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, intel_plane->base.state->rotation); @@ -2854,16 +2854,14 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, view.type)) return -1; - offset = vma->node.start; + offset = i915_ggtt_offset(vma); if (plane == 1) { offset += vma->ggtt_view.params.rotated.uv_start_page * PAGE_SIZE; } - WARN_ON(upper_32_bits(offset)); - - return lower_32_bits(offset); + return offset; } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -11410,7 +11408,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, DERRMR); - intel_ring_emit(ring, req->engine->scratch->node.start + 256); + intel_ring_emit(ring, + i915_ggtt_offset(req->engine->scratch) + 256); if (IS_GEN8(dev)) { intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 863e7a91ece0..cfe28d19ad68 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -249,7 +249,7 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma); info->fix.smem_len = vma->node.size; vaddr = i915_vma_pin_iomap(vma); @@ -276,8 +276,8 @@ static int intelfb_create(struct drm_fb_helper *helper, /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx\n", - fb->width, fb->height, vma->node.start); + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", + fb->width, fb->height, i915_ggtt_offset(vma)); ifbdev->vma = vma; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 0d57a961a33a..9d528315d065 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -194,14 +194,14 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) } if (guc->ads) { - u32 ads = (u32)guc->ads->node.start >> PAGE_SHIFT; + u32 ads = i915_ggtt_offset(guc->ads) >> PAGE_SHIFT; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; } /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { - u32 pgs = dev_priv->guc.ctx_pool->node.start; + u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool); u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; @@ -271,7 +271,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ - offset = vma->node.start + guc_fw->header_offset; + offset = i915_ggtt_offset(vma) + guc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0e53704f3a19..00620b44d9b3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc = ctx->desc_template; /* bits 3-4 */ desc |= engine->ctx_desc_template; /* bits 0-11 */ - desc |= ce->state->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ @@ -792,7 +792,8 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start; + lrc_reg_state[CTX_RING_BUFFER_START+1] = + i915_ggtt_offset(ce->ring->vma); ce->lrc_reg_state = lrc_reg_state; ce->state->obj->dirty = true; @@ -914,7 +915,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch->node.start + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); @@ -932,7 +933,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch->node.start + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); return index; @@ -993,7 +994,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ - scratch_addr = engine->scratch->node.start + 2*CACHELINE_BYTES; + scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1073,7 +1074,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, /* Actual scratch location is at 128 bytes offset */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { uint32_t scratch_addr = - engine->scratch->node.start + 2*CACHELINE_BYTES; + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1480,7 +1481,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, { struct intel_ring *ring = request->ring; struct intel_engine_cs *engine = request->engine; - u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; + u32 scratch_addr = + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; @@ -1750,7 +1752,7 @@ lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) return PTR_ERR(hws); engine->status_page.page_addr = hws + hws_offset; - engine->status_page.ggtt_offset = vma->node.start + hws_offset; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; engine->status_page.vma = vma; return 0; @@ -2018,7 +2020,7 @@ populate_lr_context(struct i915_gem_context *ctx, RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); if (engine->wa_ctx.vma) { struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - u32 ggtt_offset = wa_ctx->vma->node.start; + u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); reg_state[CTX_RCS_INDIRECT_CTX+1] = (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index e97e16cc906f..3d3f47c39fb3 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1411,7 +1411,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) ret = PTR_ERR(vma); goto out_free_bo; } - overlay->flip_addr = vma->node.start; + overlay->flip_addr = i915_ggtt_offset(vma); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ff327084857b..2e761e73d0ec 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -176,7 +176,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch->node.start + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); @@ -212,7 +212,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch->node.start + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -286,7 +286,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch->node.start + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -371,7 +371,7 @@ static int gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { u32 scratch_addr = - req->engine->scratch->node.start + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -571,7 +571,7 @@ static int init_ring_common(struct intel_engine_cs *engine) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(engine, ring->vma->node.start); + I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ if (I915_READ_HEAD(engine)) @@ -586,16 +586,16 @@ static int init_ring_common(struct intel_engine_cs *engine) /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && - I915_READ_START(engine) == ring->vma->node.start && + I915_READ_START(engine) == i915_ggtt_offset(ring->vma) && (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " - "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08llx]\n", + "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n", engine->name, I915_READ_CTL(engine), I915_READ_CTL(engine) & RING_VALID, I915_READ_HEAD(engine), I915_READ_TAIL(engine), I915_READ_START(engine), - ring->vma->node.start); + i915_ggtt_offset(ring->vma)); ret = -EIO; goto out; } @@ -1716,7 +1716,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req, unsigned int dispatch_flags) { struct intel_ring *ring = req->ring; - u32 cs_offset = req->engine->scratch->node.start; + u32 cs_offset = i915_ggtt_offset(req->engine->scratch); int ret; ret = intel_ring_begin(req, 6); @@ -1857,11 +1857,11 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; engine->status_page.vma = vma; - engine->status_page.ggtt_offset = vma->node.start; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); engine->status_page.page_addr = i915_gem_object_pin_map(obj); - DRM_DEBUG_DRIVER("%s hws offset: 0x%08llx\n", - engine->name, vma->node.start); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); return 0; err_unref: @@ -2545,13 +2545,13 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, } if (INTEL_GEN(dev_priv) >= 8) { - u64 offset = dev_priv->semaphore->node.start; + u32 offset = i915_ggtt_offset(dev_priv->semaphore); engine->semaphore.sync_to = gen8_ring_sync_to; engine->semaphore.signal = gen8_xcs_signal; for (i = 0; i < I915_NUM_ENGINES; i++) { - u64 ring_offset; + u32 ring_offset; if (i != engine->id) ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); -- 2.8.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel