On Wed, Nov 11, 2015 at 04:06:13PM +0530, ankitprasad.r.sharma@xxxxxxxxx wrote: > +swap_pages: > + stolen_pages = obj->pages; > + obj->pages = NULL; > + > + obj->base.filp = file; > + obj->base.read_domains = I915_GEM_DOMAIN_CPU; > + obj->base.write_domain = I915_GEM_DOMAIN_CPU; > + > + /* Recreate any pinned binding with pointers to the new storage */ > + if (!list_empty(&obj->vma_list)) { > + ret = i915_gem_object_get_pages_gtt(obj); > + if (ret) { > + obj->pages = stolen_pages; > + goto err_file; > + } > + > + ret = i915_gem_gtt_prepare_object(obj); > + if (ret) { > + i915_gem_object_put_pages_gtt(obj); > + obj->pages = stolen_pages; > + goto err_file; > + } i915_gem_gtt_prepare_object() has been moved into i915_gem_object_get_pages_gtt() so we can skip the call here. > + > + ret = i915_gem_object_set_to_gtt_domain(obj, true); > + if (ret) { > + i915_gem_gtt_finish_object(obj); Ditto. > +int > +i915_gem_freeze(struct drm_device *dev) > +{ > + /* Called before i915_gem_suspend() when hibernating */ > + struct drm_i915_private *i915 = to_i915(dev); > + struct drm_i915_gem_object *obj, *tmp; > + struct list_head *phase[] = { > + &i915->mm.unbound_list, &i915->mm.bound_list, NULL > + }, **p; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; > + > + /* Across hibernation, the stolen area is not preserved. > + * Anything inside stolen must copied back to normal > + * memory if we wish to preserve it. > + */ > + for (p = phase; *p; p++) { > + struct list_head migrate; > + int ret; > + > + INIT_LIST_HEAD(&migrate); > + list_for_each_entry_safe(obj, tmp, *p, global_list) { > + if (obj->stolen == NULL) > + continue; > + > + if (obj->internal_volatile) > + continue; > + > + /* In the general case, this object may only be alive > + * due to an active reference, and that may disappear > + * when we unbind any of the objects (and so wait upon > + * the GPU and retire requests). To prevent one of the > + * objects from disappearing beneath us, we need to > + * take a reference to each as we build the migration > + * list. > + * > + * This is similar to the strategy required whilst > + * shrinking or evicting objects (for the same reason). > + */ > + drm_gem_object_reference(&obj->base); > + list_move(&obj->global_list, &migrate); > + } > + > + ret = 0; > + list_for_each_entry_safe(obj, tmp, &migrate, global_list) { > + if (ret == 0) > + ret = i915_gem_object_migrate_stolen_to_shmemfs(obj); > + drm_gem_object_unreference(&obj->base); > + } > + list_splice(&migrate, *p); > + if (ret) break; > + } mutex_unlock(&dev->struct_mutex; > + > + return 0; return ret; -Chris -- Chris Wilson, Intel Open Source Technology Centre _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx