When allocating objects from stolen, memset() the backing store to POISON_INUSE (0x5a) to help identify any uninitialised use of a stolen object. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 0be5e8683337..4c2869c0a802 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -572,6 +572,38 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .release = i915_gem_object_release_stolen, }; +static void dbg_poison(struct drm_i915_gem_object *obj) +{ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + struct sgt_iter iter; + dma_addr_t addr; + + if (!drm_mm_node_allocated(&ggtt->error_capture)) + return; + + mutex_lock(&ggtt->error_mutex); + for_each_sgt_daddr(addr, iter, obj->mm.pages) { + void __iomem *s; + + ggtt->vm.insert_page(&ggtt->vm, addr, + ggtt->error_capture.start, + I915_CACHE_NONE, 0); + mb(); + + s = io_mapping_map_wc(&ggtt->iomap, + ggtt->error_capture.start, + PAGE_SIZE); + memset(s, POISON_INUSE, PAGE_SIZE); + io_mapping_unmap(s); + } + mb(); + ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); + mutex_unlock(&ggtt->error_mutex); +#endif +} + static struct drm_i915_gem_object * __i915_gem_object_create_stolen(struct intel_memory_region *mem, struct drm_mm_node *stolen) @@ -598,6 +630,7 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem, goto cleanup; i915_gem_object_init_memory_region(obj, mem, 0); + dbg_poison(obj); return obj; -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx