[PATCH 70/70] drm/i915: Use vma as the primary token for managing binding

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a nasty patch that does multiple things:

1. Fixes the obj->pin_display confusion (separated out by Tvrtko).
2. Simplifies the view API
3. Introduces a vma hashtable for lookups (optimising for OglDrvCtx,
igt/gem_ctx_thrash  and friends)
4. Introduces the VMA as the binding token used. That is when you bind
your object your given a VMA cookie which you then use for all queries
(such as how much and where in the VM am I) and then to unbind. This is
to try and kill all the repeated i915_obj_to_vma() when we already have
the vma. This is less successful than hoped (~90% is a trivial
conversion that naturally operates with i915_vma rather than the obj.
The biggest sticking point is the atomic modesetting where we do not
have the ability to track per-instance data.)

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_debugfs.c          |   8 +-
 drivers/gpu/drm/i915/i915_drv.h              |  56 +++---
 drivers/gpu/drm/i915/i915_gem.c              | 282 +++++++++------------------
 drivers/gpu/drm/i915/i915_gem_context.c      |  52 +++--
 drivers/gpu/drm/i915/i915_gem_evict.c        |   2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  46 +++--
 drivers/gpu/drm/i915/i915_gem_gtt.c          | 103 ++++++----
 drivers/gpu/drm/i915/i915_gem_gtt.h          |   3 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c |  22 +--
 drivers/gpu/drm/i915/i915_gem_render_state.h |   1 +
 drivers/gpu/drm/i915/i915_gem_shrinker.c     |   4 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c       |   6 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c      |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c        |   6 +-
 drivers/gpu/drm/i915/intel_display.c         |  59 +++---
 drivers/gpu/drm/i915/intel_drv.h             |  10 +-
 drivers/gpu/drm/i915/intel_fbdev.c           |   9 +-
 drivers/gpu/drm/i915/intel_lrc.c             |  34 ++--
 drivers/gpu/drm/i915/intel_overlay.c         |  39 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 127 +++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h      |   2 +
 21 files changed, 433 insertions(+), 440 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7c84420b374f..e62fa2236ece 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -114,7 +114,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+	return i915_gem_obj_to_ggtt(obj, NULL) ? "g" : " ";
 }
 
 static void
@@ -146,7 +146,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (vma->pin_count > 0)
 			pin_count++;
 	}
@@ -155,7 +155,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!vma->is_ggtt)
 			seq_puts(m, " (pp");
 		else
@@ -329,7 +329,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		stats->shared += obj->base.size;
 
 	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
 			if (!drm_mm_node_allocated(&vma->node))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ba593ee78863..b9830a48436b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -802,6 +802,7 @@ struct intel_context {
 	/* Legacy ring buffer submission */
 	struct {
 		struct drm_i915_gem_object *rcs_state;
+		struct i915_vma *rcs_vma;
 		bool initialized;
 	} legacy_hw_ctx;
 
@@ -809,6 +810,7 @@ struct intel_context {
 	bool rcs_initialized;
 	struct {
 		struct drm_i915_gem_object *state;
+		struct i915_vma *vma;
 		struct intel_ringbuffer *ringbuf;
 		int pin_count;
 	} engine[I915_NUM_RINGS];
@@ -1919,6 +1921,7 @@ struct drm_i915_gem_object {
 
 	/** List of VMAs backed by this object */
 	struct list_head vma_list;
+	struct hlist_head *vma_ht;
 
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
@@ -1980,7 +1983,6 @@ struct drm_i915_gem_object {
 	 * accurate mappable working set.
 	 */
 	unsigned int fault_mappable:1;
-	unsigned int pin_display:1;
 
 	/*
 	 * Is the object to be mapped as read-only to the GPU
@@ -1994,6 +1996,8 @@ struct drm_i915_gem_object {
 
 	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
+	unsigned int pin_display;
+
 	struct sg_table *pages;
 	int pages_pin_count;
 	struct get_page {
@@ -2656,13 +2660,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
 #define PIN_OFFSET_BIAS 0x8
 #define PIN_OFFSET_FIXED 0x10
 #define PIN_OFFSET_MASK (~4095)
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
 		    uint32_t size,
 		    uint32_t alignment,
 		    uint64_t flags);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
 			 uint32_t size,
@@ -2840,13 +2844,12 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
 				  bool write);
 int __must_check
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
 				     const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					      const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 				int align);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2878,7 +2881,7 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
 static inline unsigned long
 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
 {
-	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
+	return i915_gem_obj_ggtt_offset_view(o, NULL);
 }
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
@@ -2891,29 +2894,16 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 				struct i915_address_space *vm);
 struct i915_vma *
 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-			  const struct i915_ggtt_view *view);
+		     struct i915_address_space *vm,
+		     const struct i915_ggtt_view *view);
 
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view);
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view);
 
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
-/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
@@ -2921,10 +2911,20 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
+/* Some GGTT VM helpers */
+#define i915_obj_to_ggtt(obj) \
+	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj,
+		     const struct i915_ggtt_view *view)
+{
+	return i915_gem_obj_to_vma(obj, i915_obj_to_ggtt(obj), view);
+}
 
 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+	return i915_gem_obj_ggtt_bound_view(obj, NULL);
 }
 
 static inline unsigned long
@@ -2933,7 +2933,7 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
 }
 
-static inline int __must_check
+static inline struct i915_vma * __must_check
 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 		      uint32_t alignment,
 		      unsigned flags)
@@ -2945,7 +2945,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 static inline int
 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
 {
-	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
+	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj, NULL));
 }
 
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
@@ -2953,7 +2953,7 @@ void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 static inline void
 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
 {
-	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+	i915_gem_object_ggtt_unpin_view(obj, NULL);
 }
 
 /* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7b27236f2c29..42410571440d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -295,7 +295,7 @@ drop_pages(struct drm_i915_gem_object *obj)
 	int ret;
 
 	drm_gem_object_reference(&obj->base);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
 		if (i915_vma_unbind(vma))
 			break;
 
@@ -789,14 +789,17 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 			 struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_vma *vma;
 	ssize_t remain;
 	loff_t offset, page_base;
 	char __user *user_data;
 	int page_offset, page_length, ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
-	if (ret)
+	vma = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto out;
+	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
@@ -809,7 +812,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	user_data = to_user_ptr(args->data_ptr);
 	remain = args->size;
 
-	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+	offset = vma->node.start + args->offset;
 
 	intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
 
@@ -844,7 +847,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 out_flush:
 	intel_fb_obj_flush(obj, false);
 out_unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	i915_vma_unpin(vma);
 out:
 	return ret;
 }
@@ -1720,6 +1723,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_vma *ggtt;
 	pgoff_t page_offset;
 	unsigned long pfn;
 	int ret = 0;
@@ -1753,8 +1757,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* Now bind it into the GTT if needed */
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
-	if (ret)
+	ggtt = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+	if (IS_ERR(ggtt))
 		goto unlock;
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
@@ -1766,7 +1770,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		goto unpin;
 
 	/* Finally, remap it using the new GTT offset */
-	pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+	pfn = dev_priv->gtt.mappable_base + ggtt->node.start;
 	pfn >>= PAGE_SHIFT;
 
 	if (!obj->fault_mappable) {
@@ -1789,7 +1793,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 				    (unsigned long)vmf->virtual_address,
 				    pfn + page_offset);
 unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	ggtt->pin_count--;
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 out:
@@ -2379,7 +2383,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	if (obj->active)
 		return;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!list_empty(&vma->mm_list))
 			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
 	}
@@ -3184,7 +3188,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (list_empty(&vma->vma_link))
+	if (list_empty(&vma->obj_link))
 		return 0;
 
 	if (!drm_mm_node_allocated(&vma->node)) {
@@ -3929,7 +3933,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 					    old_write_domain);
 
 	/* And bump the LRU for this access */
-	vma = i915_gem_obj_to_ggtt(obj);
+	vma = i915_gem_obj_to_ggtt(obj, NULL);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
 		list_move_tail(&vma->mm_list,
 			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
@@ -3947,13 +3951,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	if (obj->cache_level == cache_level)
 		return 0;
 
-	if (i915_gem_obj_is_pinned(obj)) {
-		DRM_DEBUG("can not change the cache level of pinned objects\n");
-		return -EBUSY;
-	}
-
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
+			if (vma->pin_count) {
+				DRM_DEBUG("can not change the cache level of pinned objects\n");
+				return -EBUSY;
+			}
 			ret = i915_vma_unbind(vma);
 			if (ret)
 				return ret;
@@ -3977,7 +3980,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				return ret;
 		}
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (drm_mm_node_allocated(&vma->node)) {
 				ret = i915_vma_bind(vma, cache_level,
 						    vma->bound & GLOBAL_BIND);
@@ -3986,7 +3989,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			}
 	}
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		vma->node.color = cache_level;
 	obj->cache_level = cache_level;
 
@@ -4078,48 +4081,29 @@ unlock:
 	return ret;
 }
 
-static bool is_pin_display(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-
-	vma = i915_gem_obj_to_ggtt(obj);
-	if (!vma)
-		return false;
-
-	/* There are 2 sources that pin objects:
-	 *   1. The display engine (scanouts, sprites, cursors);
-	 *   2. Reservations for execbuffer;
-	 *
-	 * We can ignore reservations as we hold the struct_mutex and
-	 * are only called outside of the reservation path.
-	 */
-	return vma->pin_count;
-}
-
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
  * any flushes to be pipelined (for pageflips).
  */
-int
+struct i915_vma *
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
 				     const struct i915_ggtt_view *view)
 {
 	u32 old_read_domains, old_write_domain;
-	bool was_pin_display;
+	struct i915_vma *vma;
 	int ret;
 
 	ret = i915_gem_object_sync(obj, pipelined);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	/* Mark the pin_display early so that we account for the
 	 * display coherency whilst setting up the cache domains.
 	 */
-	was_pin_display = obj->pin_display;
-	obj->pin_display = true;
+	obj->pin_display++;
 
 	/* The display engine is not coherent with the LLC cache on gen6.  As
 	 * a result, we make sure that the pinning that is about to occur is
@@ -4132,8 +4116,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
 					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
-	if (ret)
+	if (ret) {
+		vma = ERR_PTR(ret);
 		goto err_unpin_display;
+	}
 
 	/* As the user may map the buffer once pinned in the display plane
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
@@ -4142,14 +4128,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * put it anyway and hope that userspace can cope (but always first
 	 * try to preserve the existing ABI).
 	 */
-	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
-				       view->type == I915_GGTT_VIEW_NORMAL ?
-				       PIN_MAPPABLE : 0);
-	if (ret)
-		ret = i915_gem_obj_ggtt_pin(obj, alignment, 0);
-	if (ret)
+	vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+				       view ? PIN_MAPPABLE : 0);
+	if (IS_ERR(vma))
+		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+	if (IS_ERR(vma))
 		goto err_unpin_display;
 
+	WARN_ON(obj->pin_display > vma->pin_count);
+
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -4165,21 +4152,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 					    old_read_domains,
 					    old_write_domain);
 
-	return 0;
+	return vma;
 
 err_unpin_display:
-	WARN_ON(was_pin_display != is_pin_display(obj));
-	obj->pin_display = was_pin_display;
-	return ret;
+	obj->pin_display--;
+	return vma;
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					 const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-	i915_gem_object_ggtt_unpin_view(obj, view);
+	WARN_ON(vma->obj->pin_display == 0);
+	vma->obj->pin_display--;
+
+	i915_vma_unpin(vma);
 
-	obj->pin_display = is_pin_display(obj);
+	WARN_ON(vma->obj->pin_display > vma->pin_count);
 }
 
 int
@@ -4392,99 +4380,89 @@ i915_vma_pin(struct i915_vma *vma,
 	return 0;
 }
 
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
-		       struct i915_address_space *vm,
-		       const struct i915_ggtt_view *ggtt_view,
-		       uint32_t size,
-		       uint32_t alignment,
-		       uint64_t flags)
+static struct i915_vma *
+__i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		      struct i915_address_space *vm,
+		      const struct i915_ggtt_view *ggtt_view,
+		      uint32_t size,
+		      uint32_t alignment,
+		      uint64_t flags)
 {
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	struct i915_vma *vma;
 	int ret;
 
 	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
-		return -ENODEV;
+		return ERR_PTR(-ENODEV);
 
 	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !vm->is_ggtt))
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
-		return -EINVAL;
-
-	if (WARN_ON(vm->is_ggtt != !!ggtt_view))
-		return -EINVAL;
-
-	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
-			  i915_gem_obj_to_vma(obj, vm);
+		return ERR_PTR(-EINVAL);
 
+	vma = i915_gem_obj_to_vma(obj, vm, ggtt_view);
 	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+		return vma;
 
 	if (vma) {
 		if (i915_vma_misplaced(vma, size, alignment, flags)) {
-			unsigned long offset;
-			offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
-					     i915_gem_obj_offset(obj, vm);
 			WARN(vma->pin_count,
 			     "bo is already pinned in %s with incorrect alignment:"
 			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
-			     ggtt_view ? "ggtt" : "ppgtt",
-			     offset,
-			     alignment,
+			     vma->is_ggtt ? "ggtt" : "ppgtt",
+			     (long)vma->node.start, alignment,
 			     !!(flags & PIN_MAPPABLE),
 			     obj->map_and_fenceable);
 			ret = i915_vma_unbind(vma);
 			if (ret)
-				return ret;
+				return ERR_PTR(ret);
 
 			vma = NULL;
 		}
 	}
 
 	if (vma == NULL) {
-		vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-			i915_gem_obj_lookup_or_create_vma(obj, vm);
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, ggtt_view);
 		if (IS_ERR(vma))
-			return PTR_ERR(vma);
+			return vma;
 	}
 
-	return i915_vma_pin(vma, size, alignment, flags);
+	ret = i915_vma_pin(vma, size, alignment, flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return vma;
 }
 
-int
+struct i915_vma *
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
 		    uint32_t size,
 		    uint32_t alignment,
 		    uint64_t flags)
 {
-	return i915_gem_object_do_pin(obj, vm,
-				      vm->is_ggtt ? &i915_ggtt_view_normal : NULL,
-				      size, alignment, flags);
+	return __i915_gem_object_pin(obj, vm, NULL,
+				     size, alignment, flags);
 }
 
-int
+struct i915_vma *
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
 			 uint32_t size,
 			 uint32_t alignment,
 			 uint64_t flags)
 {
-	if (WARN_ONCE(!view, "no view specified"))
-		return -EINVAL;
-
-	return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
-				      size, alignment, flags | PIN_GLOBAL);
+	return __i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), view,
+				     size, alignment, flags | PIN_GLOBAL);
 }
 
 void
 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 				const struct i915_ggtt_view *view)
 {
-	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
+	i915_vma_unpin(i915_gem_obj_to_ggtt(obj, view));
 }
 
 bool
@@ -4492,11 +4470,6 @@ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
 		return true;
 	} else
@@ -4726,7 +4699,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
 	trace_i915_gem_object_destroy(obj);
 
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		int ret;
 
 		vma->pin_count = 0;
@@ -4773,42 +4746,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
+	kfree(obj->vma_ht);
 	kfree(obj->bit_17);
 	i915_gem_object_free(obj);
 
 	intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma;
-	}
-	return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-					   const struct i915_ggtt_view *view)
-{
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
-	struct i915_vma *vma;
-
-	if (WARN_ONCE(!view, "no view specified"))
-		return ERR_PTR(-EINVAL);
-
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma;
-	return NULL;
-}
-
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
 	struct i915_address_space *vm = NULL;
@@ -4823,7 +4767,8 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 	if (!vm->is_ggtt)
 		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
 
-	list_del(&vma->vma_link);
+	list_del(&vma->obj_link);
+	hash_del(&vma->obj_node);
 
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
@@ -5348,13 +5293,9 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
 
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma->node.start;
-	}
+	vma = i915_gem_obj_to_vma(o, vm, NULL);
+	if (vma)
+		return vma->node.start;
 
 	WARN(1, "%s vma for this object not found.\n",
 	     vm->is_ggtt ? "global" : "ppgtt");
@@ -5365,13 +5306,9 @@ unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 			      const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, vma_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma->node.start;
+	struct i915_vma *vma = i915_gem_obj_to_ggtt(o, view);
+	if (vma)
+		return vma->node.start;
 
 	WARN(1, "global vma for this object not found.\n");
 	return -1;
@@ -5380,39 +5317,22 @@ i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 			struct i915_address_space *vm)
 {
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
-			return true;
-	}
-
-	return false;
+	struct i915_vma *vma = i915_gem_obj_to_vma(o, vm, NULL);
+	return vma && drm_mm_node_allocated(&vma->node);
 }
 
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 				  const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, vma_link)
-		if (vma->vm == ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
-		    drm_mm_node_allocated(&vma->node))
-			return true;
-
-	return false;
+	struct i915_vma *vma = i915_gem_obj_to_ggtt(o, view);
+	return vma && drm_mm_node_allocated(&vma->node);
 }
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
@@ -5429,26 +5349,16 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 
 	BUG_ON(list_empty(&o->vma_list));
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma->node.size;
-	}
+	vma = i915_gem_obj_to_vma(o, vm, NULL);
+	if (vma)
+		return vma->node.size;
+
 	return 0;
 }
 
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->pin_count > 0)
-			return true;
-	}
-	return false;
+	struct i915_vma *vma = i915_gem_obj_to_ggtt(obj, NULL);
+	return vma && vma->pin_count;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 43e58249235b..e8b3c56256c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -243,6 +243,7 @@ i915_gem_create_context(struct drm_device *dev,
 			struct drm_i915_file_private *file_priv)
 {
 	const bool is_global_default_ctx = file_priv == NULL;
+	struct i915_vma *vma = NULL;
 	struct intel_context *ctx;
 	int ret = 0;
 
@@ -260,12 +261,10 @@ i915_gem_create_context(struct drm_device *dev,
 		 * be available. To avoid this we always pin the default
 		 * context.
 		 */
-		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
+		vma = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
 					    get_context_alignment(dev), 0);
-		if (ret) {
-			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+		if (IS_ERR(vma))
 			goto err_destroy;
-		}
 	}
 
 	if (USES_FULL_PPGTT(dev)) {
@@ -286,8 +285,8 @@ i915_gem_create_context(struct drm_device *dev,
 	return ctx;
 
 err_unpin:
-	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
-		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+	if (vma)
+		i915_vma_unpin(vma);
 err_destroy:
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
@@ -481,7 +480,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 
 static inline int
 mi_set_context(struct intel_engine_cs *ring,
-	       struct intel_context *new_context,
+	       struct intel_context *to,
 	       u32 hw_flags)
 {
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
@@ -535,8 +534,7 @@ mi_set_context(struct intel_engine_cs *ring,
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
-			flags);
+	intel_ring_emit(ring, to->legacy_hw_ctx.rcs_vma->node.start | flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -631,7 +629,6 @@ static int do_switch(struct intel_engine_cs *ring,
 	struct intel_context *from = ring->last_context;
 	u32 hw_flags = 0;
 	bool uninitialized = false;
-	struct i915_vma *vma;
 	int ret, i;
 
 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -644,10 +641,18 @@ static int do_switch(struct intel_engine_cs *ring,
 
 	/* Trying to pin first makes error handling easier. */
 	if (ring == &dev_priv->ring[RCS]) {
-		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+		struct i915_vma *vma;
+
+		vma = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
 					    get_context_alignment(ring->dev), 0);
-		if (ret)
-			return ret;
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
+		to->legacy_hw_ctx.rcs_vma = vma;
+		if (WARN_ON(!(vma->bound & GLOBAL_BIND))) {
+			ret = -ENODEV;
+			goto unpin_out;
+		}
 	}
 
 	/*
@@ -689,16 +694,6 @@ static int do_switch(struct intel_engine_cs *ring,
 	if (ret)
 		goto unpin_out;
 
-	vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
-	if (!(vma->bound & GLOBAL_BIND)) {
-		ret = i915_vma_bind(vma,
-				    to->legacy_hw_ctx.rcs_state->cache_level,
-				    GLOBAL_BIND);
-		/* This shouldn't ever fail. */
-		if (WARN_ONCE(ret, "GGTT context bind failed!"))
-			goto unpin_out;
-	}
-
 	if (!to->legacy_hw_ctx.initialized) {
 		hw_flags |= MI_RESTORE_INHIBIT;
 		/* NB: If we inhibit the restore, the context is not allowed to
@@ -754,7 +749,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(from->legacy_hw_ctx.rcs_vma, ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -765,7 +760,8 @@ static int do_switch(struct intel_engine_cs *ring,
 		from->legacy_hw_ctx.rcs_state->dirty = 1;
 
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+		i915_vma_unpin(from->legacy_hw_ctx.rcs_vma);
+		from->legacy_hw_ctx.rcs_vma = NULL;
 		i915_gem_context_unreference(from);
 	}
 
@@ -787,8 +783,10 @@ done:
 	return 0;
 
 unpin_out:
-	if (ring->id == RCS)
-		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+	if (ring->id == RCS) {
+		i915_vma_unpin(to->legacy_hw_ctx.rcs_vma);
+		to->legacy_hw_ctx.rcs_vma = NULL;
+	}
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index cf33f982da8e..9f14b4e87842 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -331,7 +331,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 		list_move_tail(&obj->global_list, &still_in_list);
 
 		drm_gem_object_reference(&obj->base);
-		list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+		list_for_each_entry_safe(vma, v, &obj->vma_list, obj_link)
 			if (WARN_ON(i915_vma_unbind(vma)))
 				break;
 		drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1b673c55934e..eac86d97f935 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -162,7 +162,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
 		 * from the (obj, vm) we don't run the risk of creating
 		 * duplicated vmas for the same vm.
 		 */
-		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
 		if (IS_ERR(vma)) {
 			DRM_DEBUG("Failed to lookup VMA\n");
 			ret = PTR_ERR(vma);
@@ -244,7 +244,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 		i915_gem_object_unpin_fence(obj);
 
 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		vma->pin_count--;
+		i915_vma_unpin(vma);
 
 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -1215,7 +1215,7 @@ i915_emit_box(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static struct i915_vma*
+static struct i915_vma *
 i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
 			  struct eb_vmas *eb,
@@ -1224,7 +1224,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
 			  bool is_master)
 {
 	struct drm_i915_gem_object *shadow_batch_obj;
-	struct i915_vma *vma;
+	struct i915_vma *vma = NULL;
 	int ret;
 
 	shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
@@ -1238,31 +1238,28 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
 			      batch_start_offset,
 			      batch_len,
 			      is_master);
-	if (ret)
+	if (ret) {
+		if (ret != -EACCES) /* unhandled chained batch */
+			vma = ERR_PTR(ret);
 		goto err;
+	}
 
-	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
-	if (ret)
+	vma = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err;
-
-	i915_gem_object_unpin_pages(shadow_batch_obj);
+	}
 
 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
-	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
 	vma->exec_entry = shadow_exec_entry;
 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
 	drm_gem_object_reference(&shadow_batch_obj->base);
 	list_add_tail(&vma->exec_list, &eb->vmas);
 
-	return vma;
-
 err:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
-	if (ret == -EACCES) /* unhandled chained batch */
-		return NULL;
-	else
-		return ERR_PTR(ret);
+	return vma;
 }
 
 int
@@ -1642,6 +1639,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (dispatch_flags & I915_DISPATCH_SECURE) {
+		struct i915_vma *vma;
 		/*
 		 * So on first glance it looks freaky that we pin the batch here
 		 * outside of the reservation loop. But:
@@ -1652,17 +1650,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_obj_ggtt_pin(eb->batch_vma->obj, 0, 0);
-		if (ret)
+		vma = i915_gem_obj_ggtt_pin(eb->batch_vma->obj, 0, 0);
+		if (IS_ERR(vma))
 			goto err;
-
-		exec_start += i915_gem_obj_ggtt_offset(eb->batch_vma->obj);
-	} else
-		exec_start += eb->batch_vma->node.start;
+		eb->batch_vma = vma;
+	}
 
 	ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-					  &eb->vmas, eb->batch_vma->obj,
-					  exec_start, dispatch_flags);
+					  &eb->vmas,
+					  eb->batch_vma->obj,
+					  exec_start + eb->batch_vma->node.start,
+					  dispatch_flags);
 
 	/*
 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a80573105a61..3cf5fb62aff5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1668,8 +1668,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 				       true);
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-							   &dev_priv->gtt.base);
+		struct i915_vma *vma =
+		       	i915_gem_obj_to_vma(obj, &dev_priv->gtt.base, NULL);
 		if (!vma)
 			continue;
 
@@ -2051,12 +2051,12 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 
 	/* Mark any preallocated objects as occupied */
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm, NULL);
 
-		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
-			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
+		DRM_DEBUG_KMS("reserving preallocated space: %lx + %lx\n",
+			      (long)vma->node.start, (long)vma->node.size);
 
-		WARN_ON(i915_gem_obj_ggtt_bound(obj));
+		WARN_ON(vma->bound & GLOBAL_BIND);
 		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
 		if (ret) {
 			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
@@ -2534,6 +2534,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
 	return 0;
 }
 
+inline static unsigned __vma_hash(struct i915_address_space *vm,
+				  unsigned int type)
+{
+	return hash_min((unsigned long)vm | type, 4);
+}
+
+inline static unsigned vma_hash(const struct i915_vma *vma)
+{
+	return __vma_hash(vma->vm, vma->ggtt_view.type);
+}
+
 static struct i915_vma *
 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		      struct i915_address_space *vm,
@@ -2541,14 +2552,11 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 {
 	struct i915_vma *vma;
 
-	if (WARN_ON(vm->is_ggtt != !!ggtt_view))
-		return ERR_PTR(-EINVAL);
-
 	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vma_link);
+	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->mm_list);
 	INIT_LIST_HEAD(&vma->exec_list);
 	vma->vm = vm;
@@ -2557,8 +2565,6 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 
 	if (INTEL_INFO(vm->dev)->gen >= 6) {
 		if (vm->is_ggtt) {
-			vma->ggtt_view = *ggtt_view;
-
 			vma->unbind_vma = ggtt_unbind_vma;
 			vma->bind_vma = ggtt_bind_vma;
 		} else {
@@ -2567,52 +2573,83 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		}
 	} else {
 		BUG_ON(!vm->is_ggtt);
-		vma->ggtt_view = *ggtt_view;
 		vma->unbind_vma = i915_ggtt_unbind_vma;
 		vma->bind_vma = i915_ggtt_bind_vma;
 	}
 
-	list_add_tail(&vma->vma_link, &obj->vma_list);
+	if (ggtt_view)
+		vma->ggtt_view = *ggtt_view;
+
 	if (!vm->is_ggtt)
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
+
+	INIT_HLIST_NODE(&vma->obj_node);
+	if (obj->vma_ht == NULL &&
+	    obj->vma_list.next->next != obj->vma_list.prev->prev) {
+		obj->vma_ht = kmalloc(sizeof(struct hlist_head)*16, GFP_KERNEL);
+		if (obj->vma_ht) {
+			struct i915_vma *old;
+
+			__hash_init(obj->vma_ht, 16);
+			list_for_each_entry(old, &obj->vma_list, obj_link)
+				hlist_add_head(&old->obj_node,
+					       &obj->vma_ht[vma_hash(old)]);
+		}
+	}
+	if (obj->vma_ht)
+		hlist_add_head(&vma->obj_node, &obj->vma_ht[vma_hash(vma)]);
+	list_add_tail(&vma->obj_link, &obj->vma_list);
 	return vma;
 }
 
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
+static inline bool vma_matches(struct i915_vma *vma,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *view)
 {
-	struct i915_vma *vma;
+	if (vma->vm != vm)
+		return false;
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	if (!vma)
-		vma = __i915_gem_vma_create(obj, vm,
-					    vm->is_ggtt ? &i915_ggtt_view_normal : NULL);
+	if (vma->ggtt_view.type != (view ? view->type : 0))
+		return false;
 
-	return vma;
+	return true;
 }
 
 struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view)
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm,
+		    const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
 	struct i915_vma *vma;
 
-	if (WARN_ON(!view))
-		return ERR_PTR(-EINVAL);
+	if (obj->vma_ht == NULL) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
+			if (vma_matches(vma, vm, view))
+				return vma;
+		}
+	} else {
+		int bkt = __vma_hash(vm, view ? view->type : 0);
+		hlist_for_each_entry(vma, &obj->vma_ht[bkt], obj_node)
+			if (vma_matches(vma, vm, view))
+				return vma;
+	}
 
-	vma = i915_gem_obj_to_ggtt_view(obj, view);
+	return NULL;
+}
 
-	if (IS_ERR(vma))
-		return vma;
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view)
+{
+	struct i915_vma *vma;
 
+	vma = i915_gem_obj_to_vma(obj, vm, view);
 	if (!vma)
-		vma = __i915_gem_vma_create(obj, ggtt, view);
+		vma = __i915_gem_vma_create(obj, vm, view);
 
 	return vma;
-
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4e6cdaba2569..bdae99da71c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -174,7 +174,8 @@ struct i915_vma {
 	/** This object's place on the active/inactive lists */
 	struct list_head mm_list;
 
-	struct list_head vma_link; /* Link in the object's VMA list */
+	struct list_head obj_link; /* Link in the object's VMA list */
+	struct hlist_node obj_node;
 
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 4bb91cdadec9..140581b66481 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -47,7 +47,7 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
 
 static int render_state_init(struct render_state *so, struct drm_device *dev)
 {
-	int ret;
+	struct i915_vma *vma;
 
 	so->gen = INTEL_INFO(dev)->gen;
 	so->rodata = render_state_get_rodata(dev, so->gen);
@@ -61,16 +61,16 @@ static int render_state_init(struct render_state *so, struct drm_device *dev)
 	if (so->obj == NULL)
 		return -ENOMEM;
 
-	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-	if (ret)
-		goto free_gem;
+	vma = i915_gem_obj_ggtt_pin(so->obj, 0, 0);
+	if (IS_ERR(vma)) {
+		drm_gem_object_unreference(&so->obj->base);
+		return PTR_ERR(vma);
+	}
 
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-	return 0;
+	so->vma = vma;
+	so->ggtt_offset = vma->node.start;
 
-free_gem:
-	drm_gem_object_unreference(&so->obj->base);
-	return ret;
+	return 0;
 }
 
 static int render_state_setup(struct render_state *so)
@@ -124,7 +124,7 @@ static int render_state_setup(struct render_state *so)
 
 void i915_gem_render_state_fini(struct render_state *so)
 {
-	i915_gem_object_ggtt_unpin(so->obj);
+	i915_vma_unpin(so->vma);
 	drm_gem_object_unreference(&so->obj->base);
 }
 
@@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	i915_vma_move_to_active(so.vma, ring);
 
 	ret = __i915_add_request(ring, NULL, so.obj);
 	/* __i915_add_request moves object to inactive if it fails */
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961ed3fad..09eb56fafdc0 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -35,6 +35,7 @@ struct intel_renderstate_rodata {
 struct render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	u64 ggtt_offset;
 	int gen;
 };
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index d64c54b329b2..bd1cf921aead 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -127,7 +127,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
 			/* For the unbound phase, this should be a no-op! */
 			list_for_each_entry_safe(vma, v,
-						 &obj->vma_list, vma_link)
+						 &obj->vma_list, obj_link)
 				if (i915_vma_unbind(vma))
 					break;
 
@@ -190,7 +190,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 	int count = 0;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (drm_mm_node_allocated(&vma->node))
 			count++;
 		if (vma->pin_count)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..51e0f11aed90 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -514,7 +514,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err_out;
@@ -533,9 +533,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
 			goto err_vma;
 		}
-	}
 
-	vma->bound |= GLOBAL_BIND;
+		vma->bound |= GLOBAL_BIND;
+	}
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_add_tail(&vma->mm_list, &ggtt->inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1719078c763a..d96276caab49 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -79,7 +79,7 @@ static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
 			int ret = i915_vma_unbind(vma);
 			WARN_ON(ret && ret != -EIO);
 		}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b7a00e464ba4..fc69f53059ef 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -607,7 +607,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 
 	reloc_offset = dst->gtt_offset;
 	if (vm->is_ggtt)
-		vma = i915_gem_obj_to_ggtt(src);
+		vma = i915_gem_obj_to_ggtt(src, NULL);
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
 		   vma && (vma->bound & GLOBAL_BIND) &&
 		   reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
@@ -737,7 +737,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 		if (err == last)
 			break;
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				capture_bo(err++, vma);
 	}
@@ -1096,7 +1096,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 	error->active_bo_count[ndx] = i;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				i++;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 69db1c3b26a8..0cfa852983c6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2348,7 +2348,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
 	return 0;
 }
 
-int
+struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
 			   const struct drm_plane_state *plane_state,
@@ -2358,6 +2358,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
+	struct i915_vma *vma;
 	u32 alignment;
 	int ret;
 
@@ -2386,17 +2387,17 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	case I915_FORMAT_MOD_Yf_TILED:
 		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
 			  "Y tiling bo slipped through, driver bug!\n"))
-			return -EINVAL;
+			return ERR_PTR(-ENODEV);
 		alignment = 1 * 1024 * 1024;
 		break;
 	default:
 		MISSING_CASE(fb->modifier[0]);
-		return -EINVAL;
+		return ERR_PTR(-ENODEV);
 	}
 
 	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	/* Note that the w/a also requires 64 PTE of padding following the
 	 * bo. We currently fill all unused PTE with the shadow page and so
@@ -2416,10 +2417,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	intel_runtime_pm_get(dev_priv);
 
 	dev_priv->mm.interruptible = false;
-	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+	vma = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
 						   &view);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err_interruptible;
+	}
 
 	if (obj->map_and_fenceable) {
 		/* Install a fence for tiled scan-out. Pre-i965 always needs a
@@ -2447,31 +2450,33 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
 	dev_priv->mm.interruptible = true;
 	intel_runtime_pm_put(dev_priv);
-	return 0;
+	return vma;
 
 err_unpin:
-	i915_gem_object_unpin_from_display_plane(obj, &view);
+	i915_gem_object_unpin_from_display_plane(vma);
 err_interruptible:
 	dev_priv->mm.interruptible = true;
 	intel_runtime_pm_put(dev_priv);
-	return ret;
+	return ERR_PTR(ret);
 }
 
 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state)
+			       const struct drm_plane_state *state)
 {
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
-	int ret;
+	struct i915_vma *vma;
 
 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
-	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
-	WARN_ONCE(ret, "Couldn't get view from plane state!");
+	WARN_ONCE(intel_fill_fb_ggtt_view(&view, fb, state),
+		  "Couldn't get view from plane state!");
 
 	if (obj->map_and_fenceable)
 		i915_gem_object_unpin_fence(obj);
-	i915_gem_object_unpin_from_display_plane(obj, &view);
+
+	vma = i915_gem_obj_to_ggtt(obj, &view);
+	i915_gem_object_unpin_from_display_plane(vma);
 }
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -10229,6 +10234,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_unpin_work *work;
 	struct intel_engine_cs *ring;
 	bool mmio_flip;
+	struct i915_vma *vma;
 	int ret;
 
 	/*
@@ -10333,11 +10339,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	 * synchronisation, so all we want here is to pin the framebuffer
 	 * into the display plane and skip any waits.
 	 */
-	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+	vma = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state,
 					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
-	if (ret)
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto cleanup_pending;
+	}
 
 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
 						  + intel_crtc->dspaddr_offset;
@@ -12544,7 +12552,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+		struct i915_vma *vma;
+
+		vma = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+		if (IS_ERR(vma))
+			ret = PTR_ERR(vma);
 	}
 
 	if (ret == 0)
@@ -12565,18 +12577,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 void
 intel_cleanup_plane_fb(struct drm_plane *plane,
 		       struct drm_framebuffer *fb,
-		       const struct drm_plane_state *old_state)
+		       const struct drm_plane_state *state)
 {
 	struct drm_device *dev = plane->dev;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
-	if (WARN_ON(!obj))
-		return;
 
 	if (plane->type != DRM_PLANE_TYPE_CURSOR ||
 	    !INTEL_INFO(dev)->cursor_needs_physical) {
+		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+		if (WARN_ON(!obj))
+			return;
+
 		mutex_lock(&dev->struct_mutex);
-		intel_unpin_fb_obj(fb, old_state);
+		intel_unpin_fb_obj(fb, state);
 		mutex_unlock(&dev->struct_mutex);
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 160f6a28e9a1..ba4c872e2fa1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -960,10 +960,10 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old,
 				    struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
-			       struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state,
-			       struct intel_engine_cs *pipelined);
+struct i915_vma *intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+					    struct drm_framebuffer *fb,
+					    const struct drm_plane_state *plane_state,
+					    struct intel_engine_cs *pipelined);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -977,7 +977,7 @@ int intel_prepare_plane_fb(struct drm_plane *plane,
 			   const struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
 			    struct drm_framebuffer *fb,
-			    const struct drm_plane_state *old_state);
+			    const struct drm_plane_state *state);
 int intel_plane_atomic_get_property(struct drm_plane *plane,
 				    const struct drm_plane_state *state,
 				    struct drm_property *property,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4e7e7da2e03b..033ad90201f9 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,6 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	struct drm_device *dev = helper->dev;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int size, ret;
 
 	/* we don't do packed 24bpp */
@@ -151,13 +152,15 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	}
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
-	if (ret) {
+	vma = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		DRM_ERROR("failed to pin obj: %d\n", ret);
 		goto out_fb;
 	}
 
 	ifbdev->fb = to_intel_framebuffer(fb);
+	//ifbdev->vma = vma;
 
 	return 0;
 
@@ -279,7 +282,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	return 0;
 
 out_unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	//intel_unpin_fb_obj(&ifbdev->fb->base, NULL);
 	drm_gem_object_unreference(&obj->base);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index db93eed9eacd..45f3d487944e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -475,17 +475,20 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
 	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
 	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 	u32 ggtt_offset;
+	struct i915_vma *vma;
 	int ret;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 	if (ctx->engine[ring->id].pin_count++)
 		return 0;
 
-	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
-	if (ret)
+	vma = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto reset_pin_count;
+	}
 
-	ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+	ggtt_offset = vma->node.start;
 	if (WARN_ON(ggtt_offset & 0xFFFFFFFF00000FFFULL)) {
 		ret = -ENODEV;
 		goto unpin_ctx_obj;
@@ -500,17 +503,17 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
 	ringbuf->regs[CTX_RING_BUFFER_START+1] =
 		i915_gem_obj_ggtt_offset(ringbuf->obj);
 
+	ctx->engine[ring->id].vma = vma;
 	return 0;
 
 unpin_ctx_obj:
-	i915_gem_object_ggtt_unpin(ctx_obj);
+	i915_vma_unpin(vma);
 reset_pin_count:
 	ctx->engine[ring->id].pin_count = 0;
 
 	return ret;
 }
 
-
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
 					    struct intel_context *ctx)
 {
@@ -778,17 +781,18 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 static void intel_lr_context_unpin(struct intel_engine_cs *ring,
 				   struct intel_context *ctx)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
 	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 
 	if (--ctx->engine[ring->id].pin_count)
 		return;
 
-	kunmap(i915_gem_object_get_page(ctx_obj, 1));
+	kunmap(i915_gem_object_get_page(ctx->engine[ring->id].state, 1));
 	ringbuf->regs = NULL;
 
 	intel_unpin_ringbuffer_obj(ringbuf);
-	i915_gem_object_ggtt_unpin(ctx_obj);
+
+	i915_vma_unpin(ctx->engine[ring->id].vma);
+	ctx->engine[ring->id].vma = NULL;
 }
 
 void intel_execlists_retire_requests(struct intel_engine_cs *ring)
@@ -1147,7 +1151,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	i915_vma_move_to_active(so.vma, ring);
 
 	ret = __i915_add_request(ring, file, so.obj);
 	/* intel_logical_ring_add_request moves object to inactive if it
@@ -1735,12 +1739,14 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 	}
 
 	if (is_global_default_ctx) {
-		ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
-		if (ret) {
-			DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
-					ret);
+		struct i915_vma *vma;
+
+		vma = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %ld\n",
+					 PTR_ERR(vma));
 			drm_gem_object_unreference(&ctx_obj->base);
-			return ret;
+			return PTR_ERR(vma);
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 5fd2d5ac02e2..936cf160bb7d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,8 +170,8 @@ struct overlay_registers {
 struct intel_overlay {
 	struct drm_device *dev;
 	struct intel_crtc *crtc;
-	struct drm_i915_gem_object *vid_bo;
-	struct drm_i915_gem_object *old_vid_bo;
+	struct drm_i915_gem_object *vid_bo, *old_vid_bo;
+	struct i915_vma *vid_vma, *old_vid_vma;
 	bool active;
 	bool pfit_active;
 	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -197,7 +197,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
 		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
 	else
 		regs = io_mapping_map_wc(dev_priv->gtt.mappable,
-					 i915_gem_obj_ggtt_offset(overlay->reg_bo));
+					 overlay->flip_addr);
 
 	return regs;
 }
@@ -299,7 +299,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
-	i915_gem_object_ggtt_unpin(obj);
+	i915_gem_object_unpin_from_display_plane(overlay->old_vid_vma);
 	drm_gem_object_unreference(&obj->base);
 
 	overlay->old_vid_bo = NULL;
@@ -718,6 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	struct drm_device *dev = overlay->dev;
 	u32 swidth, swidthsw, sheight, ostride;
 	enum pipe pipe = overlay->crtc->pipe;
+	struct i915_vma *vma;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -726,10 +727,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
-						   &i915_ggtt_view_normal);
-	if (ret != 0)
-		return ret;
+	vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
 	ret = i915_gem_object_put_fence(new_bo);
 	if (ret)
@@ -772,7 +772,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	swidth = params->src_w;
 	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
 	sheight = params->src_h;
-	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+	iowrite32(vma->node.start + params->offset_Y, &regs->OBUF_0Y);
 	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -786,8 +786,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      params->src_w/uv_hscale);
 		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
 		sheight |= (params->src_h/uv_vscale) << 16;
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+		iowrite32(vma->node.start + params->offset_U, &regs->OBUF_0U);
+		iowrite32(vma->node.start + params->offset_V, &regs->OBUF_0V);
 		ostride |= params->stride_UV << 16;
 	}
 
@@ -812,7 +812,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 			  INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->old_vid_vma = overlay->vid_vma;
 	overlay->vid_bo = new_bo;
+	overlay->vid_vma = vma;
 
 	intel_frontbuffer_flip(dev,
 			       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -820,7 +822,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	return 0;
 
 out_unpin:
-	i915_gem_object_ggtt_unpin(new_bo);
+	i915_gem_object_unpin_from_display_plane(vma);
 	return ret;
 }
 
@@ -1383,12 +1385,15 @@ void intel_setup_overlay(struct drm_device *dev)
 		}
 		overlay->flip_addr = reg_bo->phys_handle->busaddr;
 	} else {
-		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
-		if (ret) {
+		struct i915_vma *vma;
+
+		vma = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
+		if (IS_ERR(vma)) {
 			DRM_ERROR("failed to pin overlay register bo\n");
+			ret = PTR_ERR(vma);
 			goto out_free_bo;
 		}
-		overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+		overlay->flip_addr = vma->node.start;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1466,7 +1471,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 			overlay->reg_bo->phys_handle->vaddr;
 	else
 		regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-						i915_gem_obj_ggtt_offset(overlay->reg_bo));
+						overlay->flip_addr);
 
 	return regs;
 }
@@ -1499,7 +1504,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
 	else
-		error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+		error->base = overlay->flip_addr;
 
 	regs = intel_overlay_map_regs_atomic(overlay);
 	if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 913efe47054d..dfde7fd7b45e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -636,10 +636,11 @@ intel_fini_pipe_control(struct intel_engine_cs *ring)
 	if (ring->scratch.obj == NULL)
 		return;
 
-	if (INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_INFO(dev)->gen >= 5)
 		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-		i915_gem_object_ggtt_unpin(ring->scratch.obj);
-	}
+
+	if (ring->scratch.vma)
+		i915_vma_unpin(ring->scratch.vma);
 
 	drm_gem_object_unreference(&ring->scratch.obj->base);
 	ring->scratch.obj = NULL;
@@ -648,6 +649,7 @@ intel_fini_pipe_control(struct intel_engine_cs *ring)
 int
 intel_init_pipe_control(struct intel_engine_cs *ring)
 {
+	struct i915_vma *vma;
 	int ret;
 
 	WARN_ON(ring->scratch.obj);
@@ -663,11 +665,13 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
 	if (ret)
 		goto err_unref;
 
-	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
-	if (ret)
+	vma = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto err_unref;
+	}
 
-	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+	ring->scratch.gtt_offset = vma->node.start;
 	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
 	if (ring->scratch.cpu_page == NULL) {
 		ret = -ENOMEM;
@@ -676,10 +680,11 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
 			 ring->name, ring->scratch.gtt_offset);
+	ring->scratch.vma = vma;
 	return 0;
 
 err_unpin:
-	i915_gem_object_ggtt_unpin(ring->scratch.obj);
+	i915_vma_unpin(vma);
 err_unref:
 	drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
@@ -1823,45 +1828,45 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
 static int init_status_page(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	unsigned flags;
+	int ret;
 
-	if ((obj = ring->status_page.obj) == NULL) {
-		unsigned flags;
-		int ret;
+	if (ring->status_page.obj)
+		return 0;
 
-		obj = i915_gem_object_create_internal(ring->dev, 4096);
-		if (obj == NULL) {
-			DRM_ERROR("Failed to allocate status page\n");
-			return -ENOMEM;
-		}
+	obj = i915_gem_object_create_internal(ring->dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		return -ENOMEM;
+	}
 
-		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-		if (ret)
-			goto err_unref;
-
-		flags = 0;
-		if (!HAS_LLC(ring->dev))
-			/* On g33, we cannot place HWS above 256MiB, so
-			 * restrict its pinning to the low mappable arena.
-			 * Though this restriction is not documented for
-			 * gen4, gen5, or byt, they also behave similarly
-			 * and hang if the HWS is placed at the top of the
-			 * GTT. To generalise, it appears that all !llc
-			 * platforms have issues with us placing the HWS
-			 * above the mappable region (even though we never
-			 * actualy map it).
-			 */
-			flags |= PIN_MAPPABLE;
-		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
-		if (ret) {
-err_unref:
-			drm_gem_object_unreference(&obj->base);
-			return ret;
-		}
+	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	if (ret)
+		goto err_unref;
 
-		ring->status_page.obj = obj;
+	flags = 0;
+	if (!HAS_LLC(ring->dev))
+		/* On g33, we cannot place HWS above 256MiB, so
+		 * restrict its pinning to the low mappable arena.
+		 * Though this restriction is not documented for
+		 * gen4, gen5, or byt, they also behave similarly
+		 * and hang if the HWS is placed at the top of the
+		 * GTT. To generalise, it appears that all !llc
+		 * platforms have issues with us placing the HWS
+		 * above the mappable region (even though we never
+		 * actualy map it).
+		 */
+		flags |= PIN_MAPPABLE;
+	vma = i915_gem_obj_ggtt_pin(obj, 4096, flags);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
 	}
 
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+	ring->status_page.obj = obj;
+	ring->status_page.gfx_addr = vma->node.start;
+
 	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
@@ -1869,6 +1874,10 @@ err_unref:
 			ring->name, ring->status_page.gfx_addr);
 
 	return 0;
+
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+	return ret;
 }
 
 static int init_phys_status_page(struct intel_engine_cs *ring)
@@ -1894,7 +1903,8 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 		i915_gem_object_unpin_vmap(ringbuf->obj);
 	else
 		iounmap(ringbuf->virtual_start);
-	i915_gem_object_ggtt_unpin(ringbuf->obj);
+	i915_vma_unpin(ringbuf->vma);
+	ringbuf->vma = NULL;
 }
 
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
@@ -1902,11 +1912,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct i915_vma *vma;
 	int ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-	if (ret)
-		return ret;
+	vma = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
 	if (HAS_LLC(dev_priv) && !obj->stolen) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1932,10 +1943,11 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 		}
 	}
 
+	ringbuf->vma = vma;
 	return 0;
 
 unpin:
-	i915_gem_object_ggtt_unpin(obj);
+	i915_vma_unpin(vma);
 	return ret;
 }
 
@@ -2448,14 +2460,18 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
 				i915.semaphores = 0;
 			} else {
+				struct i915_vma *vma;
+
 				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-				ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-				if (ret != 0) {
+				vma = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+				if (IS_ERR(vma)) {
 					drm_gem_object_unreference(&obj->base);
 					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
 					i915.semaphores = 0;
-				} else
-					dev_priv->semaphore_obj = obj;
+					obj = NULL;
+				}
+
+				dev_priv->semaphore_obj = obj;
 			}
 		}
 
@@ -2549,21 +2565,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
+		struct i915_vma *vma;
+
 		obj = i915_gem_object_create_internal(dev, I830_WA_SIZE);
 		if (obj == NULL) {
 			DRM_ERROR("Failed to allocate batch bo\n");
 			return -ENOMEM;
 		}
 
-		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-		if (ret != 0) {
+		vma = i915_gem_obj_ggtt_pin(obj, 0, 0);
+		if (IS_ERR(vma)) {
 			drm_gem_object_unreference(&obj->base);
-			DRM_ERROR("Failed to ping batch bo\n");
-			return ret;
+			DRM_ERROR("Failed to pin batch bo\n");
+			return PTR_ERR(vma);
 		}
 
 		ring->scratch.obj = obj;
-		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+		ring->scratch.vma = vma;
+		ring->scratch.gtt_offset = vma->node.start;
 	}
 
 	ret = intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 75268b7d2d41..58931f902ccf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -101,6 +101,7 @@ struct intel_ringbuffer {
 	uint32_t descriptor;
 
 	struct intel_engine_cs *ring;
+	struct i915_vma *vma;
 
 	u32 head;
 	u32 tail;
@@ -289,6 +290,7 @@ struct  intel_engine_cs {
 
 	struct {
 		struct drm_i915_gem_object *obj;
+		struct i915_vma *vma;
 		u32 gtt_offset;
 		volatile u32 *cpu_page;
 	} scratch;
-- 
2.1.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux