From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Things like reliable GGTT mappings and mirrored 3d display will need to be to map the same object twice into the GGTT. Add a ggtt_view field per VMA and select the page view based on the type of the view. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 9 ++++--- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 38 +++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_gem_gtt.h | 24 ++++++++++++++++++- 6 files changed, 63 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a830b85..92dec19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2518,6 +2518,9 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, uint64_t flags); + +void i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + u32 flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7f283d8..d7027f9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3487,7 +3487,7 @@ search_free: WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); trace_i915_vma_bind(vma, flags); - vma->bind_vma(vma, obj->cache_level, + i915_vma_bind(vma, obj->cache_level, flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); return vma; @@ -3695,7 +3695,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, list_for_each_entry(vma, &obj->vma_list, vma_link) if (drm_mm_node_allocated(&vma->node)) - vma->bind_vma(vma, cache_level, + i915_vma_bind(vma, cache_level, vma->bound & GLOBAL_BIND); } @@ -4093,7 +4093,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) - vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); + i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); vma->pin_count++; if (flags & PIN_MAPPABLE) @@ -4511,6 +4511,9 @@ void i915_gem_vma_destroy(struct i915_vma *vma) list_del(&vma->vma_link); + if (vma->ggtt_view.put_pages) + vma->ggtt_view.put_pages(&vma->ggtt_view); + kfree(vma); } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5ff6e94..d365348 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -577,7 +577,7 @@ static int do_switch(struct intel_engine_cs *ring, vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); if (!(vma->bound & GLOBAL_BIND)) - vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, + i915_vma_bind(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND); if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8d56d5b..5ba9440 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -358,7 +358,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (unlikely(IS_GEN6(dev) && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && !(target_vma->bound & GLOBAL_BIND))) - target_vma->bind_vma(target_vma, target_i915_obj->cache_level, + i915_vma_bind(target_vma, target_i915_obj->cache_level, GLOBAL_BIND); /* Validate that the target is in a valid r/w GPU domain */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e4ee2ac..c0e7cd9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -30,6 +30,8 @@ #include "i915_trace.h" #include "intel_drv.h" +const struct i915_ggtt_view i915_ggtt_view_normal; + static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); @@ -71,7 +73,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) } -static void ppgtt_bind_vma(struct i915_vma *vma, +static void ppgtt_bind_vma(struct i915_vma *vma, struct sg_table *pages, enum i915_cache_level cache_level, u32 flags); static void ppgtt_unbind_vma(struct i915_vma *vma); @@ -1194,7 +1196,7 @@ void i915_ppgtt_release(struct kref *kref) } static void -ppgtt_bind_vma(struct i915_vma *vma, +ppgtt_bind_vma(struct i915_vma *vma, struct sg_table *pages, enum i915_cache_level cache_level, u32 flags) { @@ -1202,7 +1204,7 @@ ppgtt_bind_vma(struct i915_vma *vma, if (vma->obj->gt_ro) flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, + vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, flags); } @@ -1325,7 +1327,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) * without telling our object about it. So we need to fake it. */ vma->bound &= ~GLOBAL_BIND; - vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); + i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); } @@ -1511,7 +1513,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, } -static void i915_ggtt_bind_vma(struct i915_vma *vma, +static void i915_ggtt_bind_vma(struct i915_vma *vma, struct sg_table *pages, enum i915_cache_level cache_level, u32 unused) { @@ -1520,7 +1522,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; BUG_ON(!i915_is_ggtt(vma->vm)); - intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); + intel_gtt_insert_sg_entries(pages, entry, flags); vma->bound = GLOBAL_BIND; } @@ -1544,7 +1546,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma) intel_gtt_clear_range(first, size); } -static void ggtt_bind_vma(struct i915_vma *vma, +static void ggtt_bind_vma(struct i915_vma *vma, struct sg_table *pages, enum i915_cache_level cache_level, u32 flags) { @@ -1570,7 +1572,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!(vma->bound & GLOBAL_BIND) || (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, obj->pages, + vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, flags); vma->bound |= GLOBAL_BIND; @@ -1582,7 +1584,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (cache_level != obj->cache_level))) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, - vma->obj->pages, + pages, vma->node.start, cache_level, flags); vma->bound |= LOCAL_BIND; @@ -2189,3 +2191,21 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, return vma; } + +void i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + u32 flags) +{ + struct sg_table *pages; + + if (vma->ggtt_view.get_pages) + pages = vma->ggtt_view.get_pages(&vma->ggtt_view, vma->obj); + else + pages = vma->obj->pages; + + if (pages && !IS_ERR(pages)) { + vma->bind_vma(vma, pages, cache_level, flags); + + if (vma->ggtt_view.put_pages) + vma->ggtt_view.put_pages(&vma->ggtt_view); + } +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 66bc44b..cbaddda 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -109,6 +109,23 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) +enum i915_ggtt_view_type { + I915_GGTT_VIEW_NORMAL = 0, +}; + +struct i915_ggtt_view { + enum i915_ggtt_view_type type; + unsigned int vma_id; + struct sg_table *pages; + + struct sg_table *(*get_pages)(struct i915_ggtt_view *ggtt_view, + struct drm_i915_gem_object *obj); + + void (*put_pages)(struct i915_ggtt_view *ggtt_view); +}; + +extern const struct i915_ggtt_view i915_ggtt_view_normal; + enum i915_cache_level; /** * A VMA represents a GEM BO that is bound into an address space. Therefore, a @@ -136,6 +153,11 @@ struct i915_vma { #define VMA_ID_DEFAULT (0) unsigned int id; + /** + * Support different GGTT views into the same object. + */ + struct i915_ggtt_view ggtt_view; + /** This object's place on the active/inactive lists */ struct list_head mm_list; @@ -168,7 +190,7 @@ struct i915_vma { * setting the valid PTE entries to a reserved scratch page. */ void (*unbind_vma)(struct i915_vma *vma); /* Map an object into an address space with the given cache flags. */ - void (*bind_vma)(struct i915_vma *vma, + void (*bind_vma)(struct i915_vma *vma, struct sg_table *pages, enum i915_cache_level cache_level, u32 flags); }; -- 2.1.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx