Error state capture is dependent on i915_gem_active_request() and i915_gem_obj_is_pinned(). Since there is no synchronization between error state capture and the driver state we at least need to use safe list iterators in these functions to alleviate the problem of request/vma lists changing during error state capture. Signed-off-by: Tomas Elf <tomas.elf@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8210ae7..1666499 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2704,9 +2704,9 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req) struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *ring) { - struct drm_i915_gem_request *request; + struct drm_i915_gem_request *request, *tmpreq; - list_for_each_entry(request, &ring->request_list, list) { + list_for_each_entry_safe(request, tmpreq, &ring->request_list, list) { if (i915_gem_request_completed(request, false)) continue; @@ -5121,8 +5121,9 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + struct i915_vma *vma, *tmpvma; + + list_for_each_entry_safe(vma, tmpvma, &obj->vma_list, vma_link) { if (!vma) continue; -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx