Re: [PATCH 37/38] drm/i915: Track pinned VMA

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Jun 03, 2016 at 05:55:52PM +0100, Chris Wilson wrote:
> Treat the VMA as the primary struct responsible for tracking bindings
> into the GPU's VM. That is we want to treat the VMA returned after we
> pin an object into the VM as the cookie we hold and eventually release
> when unpinning. Doing so eliminates the ambiguity in pinning the object
> and then searching for the relevant pin later.
> 
> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>

Imo would be good to split this up a bit more, like you already extracted
the overlay parts. I know there's probably a few chicken&eggs in here, but
still imo needed.
-Daniel

> ---
>  drivers/gpu/drm/i915/i915_debugfs.c          |  75 +++++-----
>  drivers/gpu/drm/i915/i915_drv.h              |  64 +++------
>  drivers/gpu/drm/i915/i915_gem.c              | 200 ++++++---------------------
>  drivers/gpu/drm/i915/i915_gem_context.c      |  43 +++---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  60 ++++----
>  drivers/gpu/drm/i915/i915_gem_fence.c        |  64 ++++-----
>  drivers/gpu/drm/i915/i915_gem_gtt.c          |  58 +++++---
>  drivers/gpu/drm/i915/i915_gem_gtt.h          |  14 --
>  drivers/gpu/drm/i915/i915_gem_render_state.c |  31 ++---
>  drivers/gpu/drm/i915/i915_gem_render_state.h |   2 +-
>  drivers/gpu/drm/i915/i915_gem_request.c      |  10 +-
>  drivers/gpu/drm/i915/i915_gem_request.h      |   2 +-
>  drivers/gpu/drm/i915/i915_gem_stolen.c       |   2 +-
>  drivers/gpu/drm/i915/i915_gem_tiling.c       |  42 +++---
>  drivers/gpu/drm/i915/i915_gpu_error.c        |  55 +++-----
>  drivers/gpu/drm/i915/i915_guc_submission.c   |  28 ++--
>  drivers/gpu/drm/i915/intel_display.c         |  57 +++++---
>  drivers/gpu/drm/i915/intel_drv.h             |   5 +-
>  drivers/gpu/drm/i915/intel_fbc.c             |   2 +-
>  drivers/gpu/drm/i915/intel_fbdev.c           |  19 ++-
>  drivers/gpu/drm/i915/intel_guc_loader.c      |  29 ++--
>  drivers/gpu/drm/i915/intel_lrc.c             | 113 ++++++++-------
>  drivers/gpu/drm/i915/intel_overlay.c         |  44 +++---
>  drivers/gpu/drm/i915/intel_ringbuffer.c      | 194 ++++++++++++++------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h      |  20 +--
>  drivers/gpu/drm/i915/intel_sprite.c          |   8 +-
>  26 files changed, 549 insertions(+), 692 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 485fc23893d6..938a95df8a11 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -111,7 +111,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
>  
>  static char get_global_flag(struct drm_i915_gem_object *obj)
>  {
> -	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
> +	return i915_gem_object_to_ggtt(obj, NULL) ?  'g' : ' ';
>  }
>  
>  static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
> @@ -278,7 +278,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
>  	struct drm_device *dev = node->minor->dev;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct drm_i915_gem_object *obj;
> -	u64 total_obj_size, total_gtt_size;
> +	u64 total_obj_size;
>  	LIST_HEAD(stolen);
>  	int count, ret;
>  
> @@ -286,7 +286,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
>  	if (ret)
>  		return ret;
>  
> -	total_obj_size = total_gtt_size = count = 0;
> +	total_obj_size = count = 0;
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
>  		if (obj->stolen == NULL)
>  			continue;
> @@ -294,7 +294,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
>  		list_add(&obj->obj_exec_link, &stolen);
>  
>  		total_obj_size += obj->base.size;
> -		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
>  		count++;
>  	}
>  	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
> @@ -317,8 +316,8 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
>  	}
>  	mutex_unlock(&dev->struct_mutex);
>  
> -	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
> -		   count, total_obj_size, total_gtt_size);
> +	seq_printf(m, "Total %d objects, %llu bytes\n",
> +		   count, total_obj_size);
>  	return 0;
>  }
>  
> @@ -327,7 +326,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
>  		size += i915_gem_obj_total_ggtt_size(obj); \
>  		++count; \
>  		if (obj->map_and_fenceable) { \
> -			mappable_size += i915_gem_obj_ggtt_size(obj); \
> +			mappable_size += obj->base.size; \
>  			++mappable_count; \
>  		} \
>  	} \
> @@ -451,10 +450,10 @@ static void print_context_stats(struct seq_file *m,
>  
>  #define count_vmas(list, member) do { \
>  	list_for_each_entry(vma, list, member) { \
> -		size += i915_gem_obj_total_ggtt_size(vma->obj); \
> +		size += vma->size; \
>  		++count; \
>  		if (vma->obj->map_and_fenceable) { \
> -			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
> +			mappable_size += vma->size; \
>  			++mappable_count; \
>  		} \
>  	} \
> @@ -517,11 +516,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
>  	size = count = mappable_size = mappable_count = 0;
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
>  		if (obj->fault_mappable) {
> -			size += i915_gem_obj_ggtt_size(obj);
> +			size += obj->base.size;
>  			++count;
>  		}
>  		if (obj->pin_display) {
> -			mappable_size += i915_gem_obj_ggtt_size(obj);
> +			mappable_size += obj->base.size;
>  			++mappable_count;
>  		}
>  		if (obj->madv == I915_MADV_DONTNEED) {
> @@ -589,30 +588,29 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
>  	uintptr_t list = (uintptr_t) node->info_ent->data;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct drm_i915_gem_object *obj;
> -	u64 total_obj_size, total_gtt_size;
> +	u64 total_obj_size;
>  	int count, ret;
>  
>  	ret = mutex_lock_interruptible(&dev->struct_mutex);
>  	if (ret)
>  		return ret;
>  
> -	total_obj_size = total_gtt_size = count = 0;
> +	total_obj_size = count = 0;
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> -		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
> +		if (list == PINNED_LIST && !obj->pin_display)
>  			continue;
>  
>  		seq_puts(m, "   ");
>  		describe_obj(m, obj);
>  		seq_putc(m, '\n');
>  		total_obj_size += obj->base.size;
> -		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
>  		count++;
>  	}
>  
>  	mutex_unlock(&dev->struct_mutex);
>  
> -	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
> -		   count, total_obj_size, total_gtt_size);
> +	seq_printf(m, "Total %d objects, %llu bytes\n",
> +		   count, total_obj_size);
>  
>  	return 0;
>  }
> @@ -2075,38 +2073,35 @@ static void i915_dump_lrc_obj(struct seq_file *m,
>  			      struct i915_gem_context *ctx,
>  			      struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
> +	struct drm_i915_gem_object *obj = ctx->engine[engine->id].state;
> +	struct i915_vma *vma = ctx->engine[engine->id].vma;
>  	struct page *page;
> -	uint32_t *reg_state;
>  	int j;
> -	unsigned long ggtt_offset = 0;
>  
>  	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
> -
> -	if (ctx_obj == NULL) {
> -		seq_puts(m, "\tNot allocated\n");
> -		return;
> -	}
> -
> -	if (!i915_gem_obj_ggtt_bound(ctx_obj))
> +	if (vma == NULL) {
>  		seq_puts(m, "\tNot bound in GGTT\n");
> -	else
> -		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
> +	} else {
> +		seq_printf(m, "\tBound in GGTT at %x\n",
> +			   lower_32_bits(vma->node.start));
> +	}
>  
> -	if (i915_gem_object_get_pages(ctx_obj)) {
> -		seq_puts(m, "\tFailed to get pages for context object\n");
> +	if (i915_gem_object_get_pages(obj)) {
> +		seq_puts(m, "\tFailed to get pages for context object\n\n");
>  		return;
>  	}
>  
> -	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
> -	if (!WARN_ON(page == NULL)) {
> -		reg_state = kmap_atomic(page);
> -
> +	page = i915_gem_object_get_page(obj, LRC_STATE_PN);
> +	if (page != NULL) {
> +		uint32_t *reg_state = kmap_atomic(page);
>  		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
> -			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
> -				   ggtt_offset + 4096 + (j * 4),
> -				   reg_state[j], reg_state[j + 1],
> -				   reg_state[j + 2], reg_state[j + 3]);
> +			seq_printf(m,
> +				   "\t[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
> +				   j * 4,
> +				   reg_state[j],
> +				   reg_state[j + 1],
> +				   reg_state[j + 2],
> +				   reg_state[j + 3]);
>  		}
>  		kunmap_atomic(reg_state);
>  	}
> @@ -3210,7 +3205,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
>  		struct page *page;
>  		uint64_t *seqno;
>  
> -		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
> +		page = i915_gem_object_get_page(dev_priv->semaphore_vma->obj, 0);
>  
>  		seqno = (uint64_t *)kmap_atomic(page);
>  		for_each_engine_id(engine, dev_priv, id) {
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 77564f378771..ed968deb36aa 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -880,8 +880,8 @@ struct i915_gem_context {
>  
>  	struct intel_context {
>  		struct drm_i915_gem_object *state;
> +		struct i915_vma *vma;
>  		struct intel_ring *ring;
> -		struct i915_vma *lrc_vma;
>  		uint32_t *lrc_reg_state;
>  		u64 lrc_desc;
>  		int pin_count;
> @@ -1736,7 +1736,7 @@ struct drm_i915_private {
>  	struct pci_dev *bridge_dev;
>  	struct i915_gem_context *kernel_context;
>  	struct intel_engine_cs engine[I915_NUM_ENGINES];
> -	struct drm_i915_gem_object *semaphore_obj;
> +	struct i915_vma *semaphore_vma;
>  	uint32_t next_seqno;
>  
>  	struct drm_dma_handle *status_page_dmah;
> @@ -2996,7 +2996,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
>  	__i915_vma_unpin(vma);
>  }
>  
> -int __must_check
> +struct i915_vma * __must_check
>  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
>  			 const struct i915_ggtt_view *view,
>  			 uint64_t size,
> @@ -3174,12 +3174,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
>  				  bool write);
>  int __must_check
>  i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
> -int __must_check
> +struct i915_vma * __must_check
>  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  				     u32 alignment,
>  				     const struct i915_ggtt_view *view);
> -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
> -					      const struct i915_ggtt_view *view);
> +void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
>  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>  				int align);
>  int i915_gem_open(struct drm_device *dev, struct drm_file *file);
> @@ -3200,63 +3199,34 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
>  struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
>  				struct drm_gem_object *gem_obj, int flags);
>  
> -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
> -				  const struct i915_ggtt_view *view);
> -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
> -			struct i915_address_space *vm);
> -static inline u64
> -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> -{
> -	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
> -}
> -
> -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
> -				  const struct i915_ggtt_view *view);
> -bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
> -			struct i915_address_space *vm);
> -
>  struct i915_vma *
>  i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
> -		    struct i915_address_space *vm);
> -struct i915_vma *
> -i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
> -			  const struct i915_ggtt_view *view);
> +		     struct i915_address_space *vm,
> +		     const struct i915_ggtt_view *view);
>  
>  struct i915_vma *
>  i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
> -				  struct i915_address_space *vm);
> -struct i915_vma *
> -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
> -				       const struct i915_ggtt_view *view);
> -
> -static inline struct i915_vma *
> -i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
> -{
> -	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
> -}
> -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
> +				  struct i915_address_space *vm,
> +				  const struct i915_ggtt_view *view);
>  
> -/* Some GGTT VM helpers */
>  static inline struct i915_hw_ppgtt *
>  i915_vm_to_ppgtt(struct i915_address_space *vm)
>  {
>  	return container_of(vm, struct i915_hw_ppgtt, base);
>  }
>  
> -static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
> +static inline struct i915_vma *
> +i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
> +			const struct i915_ggtt_view *view)
>  {
> -	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
> +	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
>  }
>  
> -unsigned long
> -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
> -
> -void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
> -				     const struct i915_ggtt_view *view);
> -static inline void
> -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
> +static inline unsigned long
> +i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
> +			    const struct i915_ggtt_view *view)
>  {
> -	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
> +	return i915_gem_object_to_ggtt(o, view)->node.start;
>  }
>  
>  /* i915_gem_fence.c */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index dad00800aeef..e0db9b02ee04 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -848,16 +848,18 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
>  {
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> +	struct i915_vma *vma;
>  	ssize_t remain;
>  	loff_t offset, page_base;
>  	char __user *user_data;
>  	int page_offset, page_length, ret;
>  
> -	ret = i915_gem_object_ggtt_pin(obj, NULL,
> -				       0, 0,
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 
>  				       PIN_MAPPABLE | PIN_NONBLOCK);
> -	if (ret)
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto out;
> +	}
>  
>  	ret = i915_gem_object_set_to_gtt_domain(obj, true);
>  	if (ret)
> @@ -870,7 +872,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
>  	user_data = u64_to_user_ptr(args->data_ptr);
>  	remain = args->size;
>  
> -	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
> +	offset = vma->node.start + args->offset;
>  
>  	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
>  
> @@ -905,7 +907,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
>  out_flush:
>  	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
>  out_unpin:
> -	i915_gem_object_ggtt_unpin(obj);
> +	i915_vma_unpin(vma);
>  out:
>  	return ret;
>  }
> @@ -1382,8 +1384,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>  	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
>  	struct drm_device *dev = obj->base.dev;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct i915_ggtt *ggtt = &dev_priv->ggtt;
>  	struct i915_ggtt_view view = i915_ggtt_view_normal;
> +	struct i915_vma *ggtt;
>  	pgoff_t page_offset;
>  	unsigned long pfn;
>  	int ret = 0;
> @@ -1417,7 +1419,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>  	}
>  
>  	/* Use a partial view if the object is bigger than the aperture. */
> -	if (obj->base.size >= ggtt->mappable_end &&
> +	if (obj->base.size >= dev_priv->ggtt.mappable_end &&
>  	    obj->tiling_mode == I915_TILING_NONE) {
>  		static const unsigned int chunk_size = 256; // 1 MiB
>  
> @@ -1432,9 +1434,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>  	}
>  
>  	/* Now pin it into the GTT if needed */
> -	ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
> -	if (ret)
> +	ggtt = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
> +	if (IS_ERR(ggtt)) {
> +		ret = PTR_ERR(ggtt);
>  		goto err_unlock;
> +	}
>  
>  	ret = i915_gem_object_set_to_gtt_domain(obj, write);
>  	if (ret)
> @@ -1445,8 +1449,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>  		goto err_unpin;
>  
>  	/* Finally, remap it using the new GTT offset */
> -	pfn = ggtt->mappable_base +
> -		i915_gem_obj_ggtt_offset_view(obj, &view);
> +	pfn = dev_priv->ggtt.mappable_base + ggtt->node.start;
>  	pfn >>= PAGE_SHIFT;
>  
>  	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
> @@ -1488,7 +1491,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>  					    pfn + page_offset);
>  	}
>  err_unpin:
> -	i915_gem_object_ggtt_unpin_view(obj, &view);
> +	__i915_vma_unpin(ggtt);
>  err_unlock:
>  	mutex_unlock(&dev->struct_mutex);
>  err_rpm:
> @@ -2925,7 +2928,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>  					    old_write_domain);
>  
>  	/* And bump the LRU for this access */
> -	vma = i915_gem_obj_to_ggtt(obj);
> +	vma = i915_gem_object_to_ggtt(obj, NULL);
>  	if (vma &&
>  	    drm_mm_node_allocated(&vma->node) &&
>  	    !i915_vma_is_active(vma))
> @@ -3149,11 +3152,12 @@ rpm_put:
>   * Can be called from an uninterruptible phase (modesetting) and allows
>   * any flushes to be pipelined (for pageflips).
>   */
> -int
> +struct i915_vma *
>  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  				     u32 alignment,
>  				     const struct i915_ggtt_view *view)
>  {
> +	struct i915_vma *vma;
>  	u32 old_read_domains, old_write_domain;
>  	int ret;
>  
> @@ -3173,19 +3177,23 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  	 */
>  	ret = i915_gem_object_set_cache_level(obj,
>  					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> -	if (ret)
> +	if (ret) {
> +		vma = ERR_PTR(ret);
>  		goto err_unpin_display;
> +	}
>  
>  	/* As the user may map the buffer once pinned in the display plane
>  	 * (e.g. libkms for the bootup splash), we have to ensure that we
>  	 * always use map_and_fenceable for all scanout buffers.
>  	 */
> -	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
> +	vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
>  				       view->type == I915_GGTT_VIEW_NORMAL ?
>  				       PIN_MAPPABLE : 0);
> -	if (ret)
> +	if (IS_ERR(vma))
>  		goto err_unpin_display;
>  
> +	WARN_ON(obj->pin_display > vma->pin_count);
> +
>  	i915_gem_object_flush_cpu_write_domain(obj);
>  
>  	old_write_domain = obj->base.write_domain;
> @@ -3204,24 +3212,24 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  	/* Increment the pages_pin_count to guard against the shrinker */
>  	obj->pages_pin_count++;
>  
> -	return 0;
> +	return vma;
>  
>  err_unpin_display:
>  	obj->pin_display--;
> -	return ret;
> +	return vma;
>  }
>  
>  void
> -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
> -					 const struct i915_ggtt_view *view)
> +i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
>  {
> -	if (WARN_ON(obj->pin_display == 0))
> +	if (WARN_ON(vma->obj->pin_display == 0))
>  		return;
>  
> -	i915_gem_object_ggtt_unpin_view(obj, view);
> +	vma->obj->pin_display--;
> +	vma->obj->pages_pin_count--;
>  
> -	obj->pages_pin_count--;
> -	obj->pin_display--;
> +	i915_vma_unpin(vma);
> +	WARN_ON(vma->obj->pin_display > vma->pin_count);
>  }
>  
>  /**
> @@ -3421,26 +3429,24 @@ err:
>  	return ret;
>  }
>  
> -int
> +struct i915_vma *
>  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
> -			 const struct i915_ggtt_view *view,
> +			 const struct i915_ggtt_view *ggtt_view,
>  			 uint64_t size,
>  			 uint64_t alignment,
>  			 uint64_t flags)
>  {
> +	struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
>  	struct i915_vma *vma;
>  	int ret;
>  
> -	if (view == NULL)
> -		view = &i915_ggtt_view_normal;
> -
> -	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
> +	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, ggtt_view);
>  	if (IS_ERR(vma))
> -		return PTR_ERR(vma);
> +		return vma;
>  
>  	if (i915_vma_misplaced(vma, size, alignment, flags)) {
>  		if (flags & PIN_NONBLOCK && (vma->pin_count | vma->active))
> -			return -ENOSPC;
> +			return ERR_PTR(-ENOSPC);
>  
>  		WARN(vma->pin_count,
>  		     "bo is already pinned in ggtt with incorrect alignment:"
> @@ -3453,17 +3459,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
>  		     obj->map_and_fenceable);
>  		ret = i915_vma_unbind(vma);
>  		if (ret)
> -			return ret;
> +			return ERR_PTR(ret);
>  	}
>  
> -	return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
> -}
> +	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
> +	if (ret)
> +		return ERR_PTR(ret);
>  
> -void
> -i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
> -				const struct i915_ggtt_view *view)
> -{
> -	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
> +	return vma;
>  }
>  
>  static __always_inline unsigned
> @@ -3799,31 +3802,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  	intel_runtime_pm_put(dev_priv);
>  }
>  
> -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
> -				     struct i915_address_space *vm)
> -{
> -	struct i915_vma *vma;
> -	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> -		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
> -		    vma->vm == vm)
> -			return vma;
> -	}
> -	return NULL;
> -}
> -
> -struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
> -					   const struct i915_ggtt_view *view)
> -{
> -	struct i915_vma *vma;
> -
> -	GEM_BUG_ON(!view);
> -
> -	list_for_each_entry(vma, &obj->vma_list, obj_link)
> -		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
> -			return vma;
> -	return NULL;
> -}
> -
>  int
>  i915_gem_suspend(struct drm_device *dev)
>  {
> @@ -4321,96 +4299,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
>  	}
>  }
>  
> -/* All the new VM stuff */
> -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
> -			struct i915_address_space *vm)
> -{
> -	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
> -	struct i915_vma *vma;
> -
> -	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
> -
> -	list_for_each_entry(vma, &o->vma_list, obj_link) {
> -		if (vma->is_ggtt &&
> -		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
> -			continue;
> -		if (vma->vm == vm)
> -			return vma->node.start;
> -	}
> -
> -	WARN(1, "%s vma for this object not found.\n",
> -	     i915_is_ggtt(vm) ? "global" : "ppgtt");
> -	return -1;
> -}
> -
> -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
> -				  const struct i915_ggtt_view *view)
> -{
> -	struct i915_vma *vma;
> -
> -	list_for_each_entry(vma, &o->vma_list, obj_link)
> -		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
> -			return vma->node.start;
> -
> -	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
> -	return -1;
> -}
> -
> -bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
> -			struct i915_address_space *vm)
> -{
> -	struct i915_vma *vma;
> -
> -	list_for_each_entry(vma, &o->vma_list, obj_link) {
> -		if (vma->is_ggtt &&
> -		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
> -			continue;
> -		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
> -			return true;
> -	}
> -
> -	return false;
> -}
> -
> -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
> -				  const struct i915_ggtt_view *view)
> -{
> -	struct i915_vma *vma;
> -
> -	list_for_each_entry(vma, &o->vma_list, obj_link)
> -		if (vma->is_ggtt &&
> -		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
> -		    drm_mm_node_allocated(&vma->node))
> -			return true;
> -
> -	return false;
> -}
> -
> -unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
> -{
> -	struct i915_vma *vma;
> -
> -	GEM_BUG_ON(list_empty(&o->vma_list));
> -
> -	list_for_each_entry(vma, &o->vma_list, obj_link) {
> -		if (vma->is_ggtt &&
> -		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
> -			return vma->node.size;
> -	}
> -
> -	return 0;
> -}
> -
> -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
> -{
> -	struct i915_vma *vma;
> -	list_for_each_entry(vma, &obj->vma_list, obj_link)
> -		if (i915_vma_is_pinned(vma))
> -			return true;
> -
> -	return false;
> -}
> -
>  /* Like i915_gem_object_get_page(), but mark the returned page dirty */
>  struct page *
>  i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index c9b8c2c62828..0ed8a4a7321a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -360,8 +360,8 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
>  	} else {
>  		struct intel_context *ce = &ctx->engine[engine->id];
>  
> -		if (ce->state)
> -			i915_gem_object_ggtt_unpin(ce->state);
> +		if (ce->vma)
> +			i915_vma_unpin(ce->vma);
>  
>  		i915_gem_context_put(ctx);
>  	}
> @@ -580,9 +580,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  
>  	intel_ring_emit(ring, MI_NOOP);
>  	intel_ring_emit(ring, MI_SET_CONTEXT);
> -	intel_ring_emit(ring,
> -			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
> -			flags);
> +	intel_ring_emit(ring, req->ctx->engine[RCS].vma->node.start | flags);
>  	/*
>  	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
>  	 * WaMiSetContext_Hang:snb,ivb,vlv
> @@ -610,7 +608,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  					MI_STORE_REGISTER_MEM |
>  					MI_SRM_LRM_GLOBAL_GTT);
>  			intel_ring_emit_reg(ring, last_reg);
> -			intel_ring_emit(ring, req->engine->scratch.gtt_offset);
> +			intel_ring_emit(ring, req->engine->scratch->node.start);
>  			intel_ring_emit(ring, MI_NOOP);
>  		}
>  		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
> @@ -715,6 +713,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  	struct intel_engine_cs *engine = req->engine;
>  	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
>  	struct i915_gem_context *from;
> +	struct i915_vma *vma;
>  	u32 hw_flags;
>  	int ret, i;
>  
> @@ -722,10 +721,17 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  		return 0;
>  
>  	/* Trying to pin first makes error handling easier. */
> -	ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
> +	vma = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
>  				       to->ggtt_alignment, 0);
> -	if (ret)
> -		return ret;
> +	if (IS_ERR(vma))
> +		return PTR_ERR(vma);
> +
> +	to->engine[RCS].vma = vma;
> +
> +	if (WARN_ON(!(vma->bound & GLOBAL_BIND))) {
> +		ret = -ENODEV;
> +		goto unpin_vma;
> +	}
>  
>  	/*
>  	 * Pin can switch back to the default context if we end up calling into
> @@ -746,7 +752,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  	 */
>  	ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
>  	if (ret)
> -		goto unpin_out;
> +		goto unpin_vma;
>  
>  	if (needs_pd_load_pre(ppgtt, engine, to)) {
>  		/* Older GENs and non render rings still want the load first,
> @@ -756,7 +762,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  		trace_switch_mm(engine, to);
>  		ret = ppgtt->switch_mm(ppgtt, req);
>  		if (ret)
> -			goto unpin_out;
> +			goto unpin_vma;
>  	}
>  
>  	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
> @@ -773,7 +779,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
>  		ret = mi_set_context(req, hw_flags);
>  		if (ret)
> -			goto unpin_out;
> +			goto unpin_vma;
>  	}
>  
>  	/* The backing object for the context is done after switching to the
> @@ -783,8 +789,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  	 * MI_SET_CONTEXT instead of when the next seqno has completed.
>  	 */
>  	if (from != NULL) {
> -		struct drm_i915_gem_object *obj = from->engine[RCS].state;
> -
>  		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
>  		 * whole damn pipeline, we don't need to explicitly mark the
>  		 * object dirty. The only exception is that the context must be
> @@ -792,11 +796,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  		 * able to defer doing this until we know the object would be
>  		 * swapped, but there is no way to do that yet.
>  		 */
> -		obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
> -		i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
> -
> +		i915_vma_move_to_active(from->engine[RCS].vma, req, 0);
>  		/* obj is kept alive until the next request by its active ref */
> -		i915_gem_object_ggtt_unpin(obj);
> +		i915_vma_unpin(from->engine[RCS].vma);
> +
>  		i915_gem_context_put(from);
>  	}
>  	engine->last_context = i915_gem_context_get(to);
> @@ -841,8 +844,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>  
>  	return 0;
>  
> -unpin_out:
> -	i915_gem_object_ggtt_unpin(to->engine[RCS].state);
> +unpin_vma:
> +	i915_vma_unpin(vma);
>  	return ret;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index b89e9d2b33c4..a29c4b6fea28 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -174,8 +174,8 @@ eb_lookup_vmas(struct eb_vmas *eb,
>  		 * from the (obj, vm) we don't run the risk of creating
>  		 * duplicated vmas for the same vm.
>  		 */
> -		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
> -		if (IS_ERR(vma)) {
> +		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
> +		if (unlikely(IS_ERR(vma))) {
>  			DRM_DEBUG("Failed to lookup VMA\n");
>  			ret = PTR_ERR(vma);
>  			goto err;
> @@ -343,30 +343,34 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  		   struct drm_i915_gem_relocation_entry *reloc,
>  		   uint64_t target_offset)
>  {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> +	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> +	struct i915_vma *vma;
>  	uint64_t delta = relocation_target(reloc, target_offset);
>  	uint64_t offset;
>  	void __iomem *reloc_page;
>  	int ret;
>  
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
> +	if (IS_ERR(vma))
> +		return PTR_ERR(vma);
> +
>  	ret = i915_gem_object_set_to_gtt_domain(obj, true);
>  	if (ret)
> -		return ret;
> +		goto unpin;
>  
>  	ret = i915_gem_object_put_fence(obj);
>  	if (ret)
> -		return ret;
> +		goto unpin;
>  
>  	/* Map the page containing the relocation we're going to perform.  */
> -	offset = i915_gem_obj_ggtt_offset(obj);
> +	offset = vma->node.start;
>  	offset += reloc->offset;
>  	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
>  					      offset & PAGE_MASK);
>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>  
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_GEN(dev_priv) >= 8) {
>  		offset += sizeof(uint32_t);
>  
>  		if (offset_in_page(offset) == 0) {
> @@ -382,7 +386,9 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  
>  	io_mapping_unmap_atomic(reloc_page);
>  
> -	return 0;
> +unpin:
> +	i915_vma_unpin(vma);
> +	return ret;
>  }
>  
>  static void
> @@ -1236,7 +1242,7 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>  	return 0;
>  }
>  
> -static struct i915_vma*
> +static struct i915_vma *
>  i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
>  			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
>  			  struct drm_i915_gem_object *batch_obj,
> @@ -1260,31 +1266,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
>  			      batch_start_offset,
>  			      batch_len,
>  			      is_master);
> -	if (ret)
> +	if (ret) {
> +		if (ret == -EACCES) /* unhandled chained batch */
> +			vma = NULL;
> +		else
> +			vma = ERR_PTR(ret);
>  		goto err;
> +	}
>  
> -	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
> -	if (ret)
> +	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto err;
> -
> -	i915_gem_object_unpin_pages(shadow_batch_obj);
> +	}
>  
>  	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
>  
> -	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
>  	vma->exec_entry = shadow_exec_entry;
>  	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
>  	i915_gem_object_get(shadow_batch_obj);
>  	list_add_tail(&vma->exec_list, &eb->vmas);
>  
> -	return vma;
> -
>  err:
>  	i915_gem_object_unpin_pages(shadow_batch_obj);
> -	if (ret == -EACCES) /* unhandled chained batch */
> -		return NULL;
> -	else
> -		return ERR_PTR(ret);
> +	return vma;
>  }
>  
>  static int
> @@ -1631,6 +1636,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	 * hsw should have this fixed, but bdw mucks it up again. */
>  	if (dispatch_flags & I915_DISPATCH_SECURE) {
>  		struct drm_i915_gem_object *obj = params->batch_vma->obj;
> +		struct i915_vma *vma;
>  
>  		/*
>  		 * So on first glance it looks freaky that we pin the batch here
> @@ -1642,11 +1648,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  		 *   fitting due to fragmentation.
>  		 * So this is actually safe.
>  		 */
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
> -		if (ret)
> +		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
> +		if (IS_ERR(vma)) {
> +			ret = PTR_ERR(vma);
>  			goto err;
> +		}
>  
> -		params->batch_vma = i915_gem_obj_to_ggtt(obj);
> +		params->batch_vma = vma;
>  	}
>  
>  	/* Allocate a request for this batch buffer nice and early. */
> @@ -1662,7 +1670,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	 * inactive_list and lose its active reference. Hence we do not need
>  	 * to explicitly hold another reference here.
>  	 */
> -	params->request->batch_obj = params->batch_vma->obj;
> +	params->request->batch = params->batch_vma;
>  
>  	ret = i915_gem_request_add_to_client(params->request, file);
>  	if (ret)
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index ee91705734bc..187611eafa99 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -85,20 +85,14 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
>  	POSTING_READ(fence_reg_lo);
>  
>  	if (obj) {
> -		u32 size = i915_gem_obj_ggtt_size(obj);
> -		uint64_t val;
> -
> -		/* Adjust fence size to match tiled area */
> -		if (obj->tiling_mode != I915_TILING_NONE) {
> -			uint32_t row_size = obj->stride *
> -				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
> -			size = (size / row_size) * row_size;
> -		}
> -
> -		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
> -				 0xfffff000) << 32;
> -		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
> -		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
> +		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
> +		u32 row_size = obj->stride * (obj->tiling_mode == I915_TILING_Y  ? 32 : 8);
> +		u32 size = (u32)vma->node.size / row_size * row_size;
> +		u64 val;
> +
> +		val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
> +		val |= vma->node.start & 0xfffff000;
> +		val |= (u64)((obj->stride / 128) - 1) << fence_pitch_shift;
>  		if (obj->tiling_mode == I915_TILING_Y)
>  			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
>  		val |= I965_FENCE_REG_VALID;
> @@ -121,15 +115,17 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
>  	u32 val;
>  
>  	if (obj) {
> -		u32 size = i915_gem_obj_ggtt_size(obj);
> +		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
>  		int pitch_val;
>  		int tile_width;
>  
> -		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
> -		     (size & -size) != size ||
> -		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> -		     "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> -		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
> +		WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
> +		     !is_power_of_2(vma->node.size) ||
> +		     (vma->node.start & (vma->node.size - 1)),
> +		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08lx) aligned\n",
> +		     (long)vma->node.start,
> +		     obj->map_and_fenceable,
> +		     (long)vma->node.size);
>  
>  		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
>  			tile_width = 128;
> @@ -140,10 +136,10 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
>  		pitch_val = obj->stride / tile_width;
>  		pitch_val = ffs(pitch_val) - 1;
>  
> -		val = i915_gem_obj_ggtt_offset(obj);
> +		val = vma->node.start;
>  		if (obj->tiling_mode == I915_TILING_Y)
>  			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> -		val |= I915_FENCE_SIZE_BITS(size);
> +		val |= I915_FENCE_SIZE_BITS(vma->node.size);
>  		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
>  		val |= I830_FENCE_REG_VALID;
>  	} else
> @@ -160,22 +156,22 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
>  	uint32_t val;
>  
>  	if (obj) {
> -		u32 size = i915_gem_obj_ggtt_size(obj);
> +		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
>  		uint32_t pitch_val;
>  
> -		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
> -		     (size & -size) != size ||
> -		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> -		     "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
> -		     i915_gem_obj_ggtt_offset(obj), size);
> +		WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
> +		     !is_power_of_2(vma->node.size) ||
> +		     (vma->node.start & (vma->node.size - 1)),
> +		     "object 0x%08lx not 512K or pot-size 0x%08lx aligned\n",
> +		     (long)vma->node.start, (long)vma->node.size);
>  
>  		pitch_val = obj->stride / 128;
>  		pitch_val = ffs(pitch_val) - 1;
>  
> -		val = i915_gem_obj_ggtt_offset(obj);
> +		val = vma->node.start;
>  		if (obj->tiling_mode == I915_TILING_Y)
>  			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> -		val |= I830_FENCE_SIZE_BITS(size);
> +		val |= I830_FENCE_SIZE_BITS(vma->node.size);
>  		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
>  		val |= I830_FENCE_REG_VALID;
>  	} else
> @@ -426,13 +422,7 @@ bool
>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
>  {
>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
> -
> -		WARN_ON(!ggtt_vma ||
> -			dev_priv->fence_regs[obj->fence_reg].pin_count >
> -			ggtt_vma->pin_count);
> -		dev_priv->fence_regs[obj->fence_reg].pin_count++;
> +		to_i915(obj->base.dev)->fence_regs[obj->fence_reg].pin_count++;
>  		return true;
>  	} else
>  		return false;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index c7a77e0f18c2..775b5a4e8a5b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -3325,14 +3325,10 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  
>  	GEM_BUG_ON(vm->closed);
>  
> -	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
> -		return ERR_PTR(-EINVAL);
> -
>  	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
>  	if (vma == NULL)
>  		return ERR_PTR(-ENOMEM);
>  
> -	INIT_LIST_HEAD(&vma->obj_link);
>  	INIT_LIST_HEAD(&vma->exec_list);
>  	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
>  		init_request_active(&vma->last_read[i], i915_vma_retire);
> @@ -3342,49 +3338,69 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  	vma->size = obj->base.size;
>  	vma->is_ggtt = i915_is_ggtt(vm);
>  
> -	if (i915_is_ggtt(vm)) {
> +	if (ggtt_view) {
>  		vma->ggtt_view = *ggtt_view;
>  		if (ggtt_view->type == I915_GGTT_VIEW_PARTIAL)
>  			vma->size = ggtt_view->params.partial.size << PAGE_SHIFT;
>  		else if (ggtt_view->type == I915_GGTT_VIEW_ROTATED)
>  			vma->size = intel_rotation_info_size(&ggtt_view->params.rotated) << PAGE_SHIFT;
>  	} else
> +
> +	if (!vma->is_ggtt)
>  		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
>  
>  	list_add_tail(&vma->obj_link, &obj->vma_list);
> -
>  	return vma;
>  }
>  
> +static inline bool vma_matches(struct i915_vma *vma,
> +			       struct i915_address_space *vm,
> +			       const struct i915_ggtt_view *view)
> +{
> +	if (vma->vm != vm)
> +		return false;
> +
> +	if (!vma->is_ggtt)
> +		return true;
> +
> +	if (view == NULL)
> +		return vma->ggtt_view.type == 0;
> +
> +	if (vma->ggtt_view.type != view->type)
> +		return false;
> +
> +	return memcmp(&vma->ggtt_view.params,
> +		      &view->params,
> +		      sizeof(view->params)) == 0;
> +}
> +
>  struct i915_vma *
> -i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
> -				  struct i915_address_space *vm)
> +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
> +		    struct i915_address_space *vm,
> +		    const struct i915_ggtt_view *view)
>  {
>  	struct i915_vma *vma;
>  
> -	vma = i915_gem_obj_to_vma(obj, vm);
> -	if (!vma)
> -		vma = __i915_gem_vma_create(obj, vm,
> -					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
> +	list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
> +		if (vma_matches(vma, vm, view))
> +			return vma;
>  
> -	return vma;
> +	return NULL;
>  }
>  
>  struct i915_vma *
> -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
> -				       const struct i915_ggtt_view *view)
> +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
> +				  struct i915_address_space *vm,
> +				  const struct i915_ggtt_view *view)
>  {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> -	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
> +	struct i915_vma *vma;
>  
> +	vma = i915_gem_obj_to_vma(obj, vm, view);
>  	if (!vma)
> -		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
> +		vma = __i915_gem_vma_create(obj, vm, view);
>  
>  	GEM_BUG_ON(vma->closed);
>  	return vma;
> -
>  }
>  
>  static struct scatterlist *
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index 5655358a60e1..5b28dc251e60 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -590,20 +590,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
>  int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
>  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
>  
> -static inline bool
> -i915_ggtt_view_equal(const struct i915_ggtt_view *a,
> -                     const struct i915_ggtt_view *b)
> -{
> -	if (WARN_ON(!a || !b))
> -		return false;
> -
> -	if (a->type != b->type)
> -		return false;
> -	if (a->type != I915_GGTT_VIEW_NORMAL)
> -		return !memcmp(&a->params, &b->params, sizeof(a->params));
> -	return true;
> -}
> -
>  /**
>   * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
>   * @vma: VMA to iomap
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index 4cf82697b3db..6e6eac43db19 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -31,7 +31,7 @@
>  struct render_state {
>  	const struct intel_renderstate_rodata *rodata;
>  	struct drm_i915_gem_object *obj;
> -	u64 ggtt_offset;
> +	struct i915_vma *vma;
>  	int gen;
>  	u32 aux_batch_size;
>  	u32 aux_batch_offset;
> @@ -57,10 +57,9 @@ render_state_get_rodata(const int gen)
>  static int render_state_init(struct render_state *so,
>  			     struct drm_i915_private *dev_priv)
>  {
> -	int ret;
> +	struct i915_vma *vma;
>  
>  	so->gen = INTEL_GEN(dev_priv);
> -	so->ggtt_offset = 0;
>  	so->rodata = render_state_get_rodata(so->gen);
>  	if (so->rodata == NULL)
>  		return 0;
> @@ -72,16 +71,14 @@ static int render_state_init(struct render_state *so,
>  	if (IS_ERR(so->obj))
>  		return PTR_ERR(so->obj);
>  
> -	ret = i915_gem_object_ggtt_pin(so->obj, NULL, 0, 0, 0);
> -	if (ret)
> -		goto free_gem;
> +	vma = i915_gem_object_ggtt_pin(so->obj, NULL, 0, 0, 0);
> +	if (IS_ERR(vma)) {
> +		i915_gem_object_put(so->obj);
> +		return PTR_ERR(vma);
> +	}
>  
> -	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
> +	so->vma = vma;
>  	return 0;
> -
> -free_gem:
> -	i915_gem_object_put(so->obj);
> -	return ret;
>  }
>  
>  /*
> @@ -121,7 +118,7 @@ static int render_state_setup(struct render_state *so)
>  		u32 s = rodata->batch[i];
>  
>  		if (i * 4  == rodata->reloc[reloc_index]) {
> -			u64 r = s + so->ggtt_offset;
> +			u64 r = s + so->vma->node.start,
>  			s = lower_32_bits(r);
>  			if (so->gen >= 8) {
>  				if (i + 1 >= rodata->batch_items ||
> @@ -176,7 +173,7 @@ err_out:
>  
>  static void render_state_fini(struct render_state *so)
>  {
> -	i915_gem_object_ggtt_unpin(so->obj);
> +	i915_vma_unpin(so->vma);
>  	i915_gem_object_put(so->obj);
>  }
>  
> @@ -209,14 +206,14 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  	struct render_state so;
>  	int ret;
>  
> -	ret = render_state_prepare(req->engine, &so);
> +	ret = render_state_prepare(req->engine, memset(&so, 0, sizeof(so)));
>  	if (ret)
>  		return ret;
>  
>  	if (so.rodata == NULL)
>  		return 0;
>  
> -	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
> +	ret = req->engine->emit_bb_start(req, so.vma->node.start,
>  					 so.rodata->batch_items * 4,
>  					 I915_DISPATCH_SECURE);
>  	if (ret)
> @@ -224,7 +221,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  
>  	if (so.aux_batch_size > 8) {
>  		ret = req->engine->emit_bb_start(req,
> -						 (so.ggtt_offset +
> +						 (so.vma->node.start +
>  						  so.aux_batch_offset),
>  						 so.aux_batch_size,
>  						 I915_DISPATCH_SECURE);
> @@ -232,7 +229,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  			goto out;
>  	}
>  
> -	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
> +	i915_vma_move_to_active(so.vma, req, 0);
>  out:
>  	render_state_fini(&so);
>  	return ret;
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
> index c44fca8599bb..18cce3f06e9c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.h
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
> @@ -24,7 +24,7 @@
>  #ifndef _I915_GEM_RENDER_STATE_H_
>  #define _I915_GEM_RENDER_STATE_H_
>  
> -#include <linux/types.h>
> +struct drm_i915_gem_request;
>  
>  int i915_gem_render_state_init(struct drm_i915_gem_request *req);
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 8101d9169027..5a3d81e5458b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -404,18 +404,12 @@ static void i915_gem_mark_busy(struct drm_i915_private *dev_priv,
>   */
>  void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
>  {
> -	struct intel_engine_cs *engine;
> -	struct intel_ring *ring;
> +	struct intel_engine_cs *engine = request->engine;
> +	struct intel_ring *ring = request->ring;
>  	u32 request_start;
>  	u32 reserved_tail;
>  	int ret;
>  
> -	if (WARN_ON(request == NULL))
> -		return;
> -
> -	engine = request->engine;
> -	ring = request->ring;
> -
>  	/*
>  	 * To ensure that this call will not fail, space for its emissions
>  	 * should already have been reserved in the ring buffer. Let the ring
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index 87e055267904..a8e228f5ceb4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -111,7 +111,7 @@ struct drm_i915_gem_request {
>  
>  	/** Batch buffer related to this request if any (used for
>  	 * error state dump only) */
> -	struct drm_i915_gem_object *batch_obj;
> +	struct i915_vma *batch;
>  	struct list_head active_list;
>  
>  	/** Time at which this request was emitted, in jiffies. */
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index a881c243fca2..415fa04d5232 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -683,7 +683,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>  	if (gtt_offset == I915_GTT_OFFSET_NONE)
>  		return obj;
>  
> -	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
> +	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto err;
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index d6acd0a27c06..29fc4dfd1947 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -114,33 +114,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
>  }
>  
>  /* Is the current GTT allocation valid for the change in tiling? */
> -static bool
> +static int
>  i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>  {
> +	struct i915_vma *vma;
>  	u32 size;
>  
>  	if (tiling_mode == I915_TILING_NONE)
> -		return true;
> +		return 0;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> -		return true;
> +	if (INTEL_GEN(obj->base.dev) >= 4)
> +		return 0;
> +
> +	vma = i915_gem_object_to_ggtt(obj, NULL);
> +	if (vma == NULL)
> +		return 0;
> +
> +	if (!obj->map_and_fenceable)
> +		return 0;
>  
>  	if (IS_GEN3(obj->base.dev)) {
> -		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> -			return false;
> +		if (vma->node.start & ~I915_FENCE_START_MASK)
> +			goto bad;
>  	} else {
> -		if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
> -			return false;
> +		if (vma->node.start & ~I830_FENCE_START_MASK)
> +			goto bad;
>  	}
>  
>  	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
> -	if (i915_gem_obj_ggtt_size(obj) != size)
> -		return false;
> +	if (vma->node.size < size)
> +		goto bad;
>  
> -	if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
> -		return false;
> +	if (vma->node.start & (size - 1))
> +		goto bad;
>  
> -	return true;
> +	return 0;
> +
> +bad:
> +	return i915_vma_unbind(vma);
>  }
>  
>  /**
> @@ -227,10 +238,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
>  		 * has to also include the unfenced register the GPU uses
>  		 * whilst executing a fenced command for an untiled object.
>  		 */
> -		if (obj->map_and_fenceable &&
> -		    !i915_gem_object_fence_ok(obj, args->tiling_mode))
> -			ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
> -
> +		ret = i915_gem_object_fence_ok(obj, args->tiling_mode);
>  		if (ret == 0) {
>  			if (obj->pages &&
>  			    obj->madv == I915_MADV_WILLNEED &&
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 367b8b2ce5f2..3e42705e2fa4 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -632,18 +632,21 @@ static void i915_error_state_free(struct kref *error_ref)
>  
>  static struct drm_i915_error_object *
>  i915_error_object_create(struct drm_i915_private *dev_priv,
> -			 struct drm_i915_gem_object *src,
> -			 struct i915_address_space *vm)
> +			 struct i915_vma *vma)
>  {
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> +	struct drm_i915_gem_object *src;
>  	struct drm_i915_error_object *dst;
> -	struct i915_vma *vma = NULL;
>  	int num_pages;
>  	bool use_ggtt;
>  	int i = 0;
>  	u64 reloc_offset;
>  
> -	if (src == NULL || src->pages == NULL)
> +	if (vma == NULL)
> +		return NULL;
> +
> +	src = vma->obj;
> +	if (src->pages == NULL)
>  		return NULL;
>  
>  	num_pages = src->base.size >> PAGE_SHIFT;
> @@ -652,26 +655,19 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
>  	if (dst == NULL)
>  		return NULL;
>  
> -	if (i915_gem_obj_bound(src, vm))
> -		dst->gtt_offset = i915_gem_obj_offset(src, vm);
> -	else
> -		dst->gtt_offset = -1;
> -
> -	reloc_offset = dst->gtt_offset;
> -	if (i915_is_ggtt(vm))
> -		vma = i915_gem_obj_to_ggtt(src);
> +	reloc_offset = dst->gtt_offset = vma->node.start;
>  	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
> -		   vma && (vma->bound & GLOBAL_BIND) &&
> +		   (vma->bound & GLOBAL_BIND) &&
>  		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
>  
>  	/* Cannot access stolen address directly, try to use the aperture */
>  	if (src->stolen) {
>  		use_ggtt = true;
>  
> -		if (!(vma && vma->bound & GLOBAL_BIND))
> +		if (!(vma->bound & GLOBAL_BIND))
>  			goto unwind;
>  
> -		reloc_offset = i915_gem_obj_ggtt_offset(src);
> +		reloc_offset = vma->node.start;
>  		if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
>  			goto unwind;
>  	}
> @@ -724,8 +720,6 @@ unwind:
>  	kfree(dst);
>  	return NULL;
>  }
> -#define i915_error_ggtt_object_create(dev_priv, src) \
> -	i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
>  
>  /* The error capture is special as tries to run underneath the normal
>   * locking rules - so we use the raw version of the i915_gem_active lookup.
> @@ -851,10 +845,10 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
>  	if (!i915.semaphores)
>  		return;
>  
> -	if (!error->semaphore_obj)
> +	if (!error->semaphore_obj && dev_priv->semaphore_vma)
>  		error->semaphore_obj =
> -			i915_error_ggtt_object_create(dev_priv,
> -						      dev_priv->semaphore_obj);
> +			i915_error_object_create(dev_priv,
> +						 dev_priv->semaphore_vma);
>  
>  	for_each_engine_id(to, dev_priv, id) {
>  		int idx;
> @@ -1042,9 +1036,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
>  
>  	list_for_each_entry(vma, &dev_priv->ggtt.base.active_list, vm_link) {
>  		if ((error->ccid & PAGE_MASK) == vma->node.start) {
> -			ering->ctx = i915_error_object_create(dev_priv,
> -							      vma->obj,
> -							      vma->vm);
> +			ering->ctx = i915_error_object_create(dev_priv, vma);
>  			break;
>  		}
>  	}
> @@ -1086,13 +1078,12 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>  			 */
>  			error->ring[i].batchbuffer =
>  				i915_error_object_create(dev_priv,
> -							 request->batch_obj,
> -							 vm);
> +							 request->batch);
>  
>  			if (HAS_BROKEN_CS_TLB(dev_priv))
>  				error->ring[i].wa_batchbuffer =
> -					i915_error_ggtt_object_create(dev_priv,
> -								      engine->scratch.obj);
> +					i915_error_object_create(dev_priv,
> +								 engine->scratch);
>  
>  			if (request->pid) {
>  				struct task_struct *task;
> @@ -1112,17 +1103,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>  			error->ring[i].cpu_ring_head = ring->head;
>  			error->ring[i].cpu_ring_tail = ring->tail;
>  			error->ring[i].ringbuffer =
> -				i915_error_ggtt_object_create(dev_priv,
> -							      ring->obj);
> +				i915_error_object_create(dev_priv, ring->vma);
>  		}
>  
>  		error->ring[i].hws_page =
> -			i915_error_ggtt_object_create(dev_priv,
> -						      engine->status_page.obj);
> +			i915_error_object_create(dev_priv,
> +						 engine->status_page.vma);
>  
>  		error->ring[i].wa_ctx =
> -			i915_error_ggtt_object_create(dev_priv,
> -						      engine->wa_ctx.obj);
> +			i915_error_object_create(dev_priv, engine->wa_ctx.vma);
>  
>  		i915_gem_record_active_context(engine, error, &error->ring[i]);
>  
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 1c92c4c6b0e1..90db9a88fddc 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -375,7 +375,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>  	for_each_engine(engine, dev_priv) {
>  		struct intel_context *ce = &ctx->engine[engine->id];
>  		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
> -		struct drm_i915_gem_object *obj;
>  
>  		/* TODO: We have a design issue to be solved here. Only when we
>  		 * receive the first batch, we know which engine is used by the
> @@ -384,23 +383,20 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>  		 * for now who owns a GuC client. But for future owner of GuC
>  		 * client, need to make sure lrc is pinned prior to enter here.
>  		 */
> -		if (!ce->state)
> +		if (!ce->vma)
>  			break;	/* XXX: continue? */
>  
>  		lrc->context_desc = lower_32_bits(ce->lrc_desc);
>  
>  		/* The state page is after PPHWSP */
> -		gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
> +		gfx_addr = ce->vma->node.start;
>  		lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
>  		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
>  				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
>  
> -		obj = ce->ring->obj;
> -		gfx_addr = i915_gem_obj_ggtt_offset(obj);
> -
> -		lrc->ring_begin = gfx_addr;
> -		lrc->ring_end = gfx_addr + obj->base.size - 1;
> -		lrc->ring_next_free_location = gfx_addr;
> +		lrc->ring_begin = ce->ring->vma->node.start;
> +		lrc->ring_end = gfx_addr + ce->ring->vma->node.size - 1;
> +		lrc->ring_next_free_location = lrc->ring_begin;
>  		lrc->ring_current_tail_pointer_value = 0;
>  
>  		desc.engines_used |= (1 << engine->guc_id);
> @@ -602,23 +598,23 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
>  {
>  	struct drm_i915_private *dev_priv = guc_to_i915(guc);
>  	struct drm_i915_gem_object *obj;
> -	int ret;
> +	struct i915_vma *vma;
>  
>  	obj = i915_gem_object_create(dev_priv->dev, size);
>  	if (IS_ERR(obj))
>  		return ERR_CAST(obj);
>  
> -	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
>  				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
> -	if (ret) {
> +	if (IS_ERR(vma)) {
>  		i915_gem_object_put(obj);
> -		return ERR_PTR(ret);
> +		return vma;
>  	}
>  
>  	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
>  	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>  
> -	return i915_gem_obj_to_ggtt(obj);
> +	return vma;
>  }
>  
>  /**
> @@ -988,7 +984,7 @@ int intel_guc_suspend(struct drm_device *dev)
>  	/* any value greater than GUC_POWER_D0 */
>  	data[1] = GUC_POWER_D1;
>  	/* first page is shared data with GuC */
> -	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
> +	data[2] = ctx->engine[RCS].vma->node.start;
>  
>  	return host2guc_action(guc, data, ARRAY_SIZE(data));
>  }
> @@ -1013,7 +1009,7 @@ int intel_guc_resume(struct drm_device *dev)
>  	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
>  	data[1] = GUC_POWER_D0;
>  	/* first page is shared data with GuC */
> -	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
> +	data[2] = ctx->engine[RCS].vma->node.start;
>  
>  	return host2guc_action(guc, data, ARRAY_SIZE(data));
>  }
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 0cfaace38370..cc6f7a49bf58 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -2240,14 +2240,14 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv
>  	}
>  }
>  
> -int
> -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
> -			   unsigned int rotation)
> +struct i915_vma *
> +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
>  {
>  	struct drm_device *dev = fb->dev;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
>  	struct i915_ggtt_view view;
> +	struct i915_vma *vma;
>  	u32 alignment;
>  	int ret;
>  
> @@ -2274,10 +2274,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
>  	 */
>  	intel_runtime_pm_get(dev_priv);
>  
> -	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
> -						   &view);
> -	if (ret)
> +	vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto err_pm;
> +	}
>  
>  	/* Install a fence for tiled scan-out. Pre-i965 always needs a
>  	 * fence, whereas 965+ only requires a fence if using
> @@ -2304,19 +2305,20 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
>  	}
>  
>  	intel_runtime_pm_put(dev_priv);
> -	return 0;
> +	return vma;
>  
>  err_unpin:
> -	i915_gem_object_unpin_from_display_plane(obj, &view);
> +	i915_gem_object_unpin_from_display_plane(vma);
>  err_pm:
>  	intel_runtime_pm_put(dev_priv);
> -	return ret;
> +	return ERR_PTR(ret);
>  }
>  
>  void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
>  {
>  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
>  	struct i915_ggtt_view view;
> +	struct i915_vma *vma;
>  
>  	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
>  
> @@ -2325,7 +2327,8 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
>  	if (view.type == I915_GGTT_VIEW_NORMAL)
>  		i915_gem_object_unpin_fence(obj);
>  
> -	i915_gem_object_unpin_from_display_plane(obj, &view);
> +	vma = i915_gem_object_to_ggtt(obj, &view);
> +	i915_gem_object_unpin_from_display_plane(vma);
>  }
>  
>  /*
> @@ -2587,7 +2590,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
>  			continue;
>  
>  		obj = intel_fb_obj(fb);
> -		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
> +		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
>  			drm_framebuffer_reference(fb);
>  			goto valid_fb;
>  		}
> @@ -2745,11 +2748,11 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
>  	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
>  	if (INTEL_INFO(dev)->gen >= 4) {
>  		I915_WRITE(DSPSURF(plane),
> -			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> +			   i915_gem_object_ggtt_offset(obj, NULL) + intel_crtc->dspaddr_offset);
>  		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
>  		I915_WRITE(DSPLINOFF(plane), linear_offset);
>  	} else
> -		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
> +		I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
>  	POSTING_READ(reg);
>  }
>  
> @@ -2849,7 +2852,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
>  
>  	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
>  	I915_WRITE(DSPSURF(plane),
> -		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> +		   i915_gem_object_ggtt_offset(obj, NULL) + intel_crtc->dspaddr_offset);
>  	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
>  		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
>  	} else {
> @@ -2882,7 +2885,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
>  	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
>  				intel_plane->base.state->rotation);
>  
> -	vma = i915_gem_obj_to_ggtt_view(obj, &view);
> +	vma = i915_gem_object_to_ggtt(obj, &view);
>  	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
>  		view.type))
>  		return -1;
> @@ -11385,7 +11388,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
>  					      MI_SRM_LRM_GLOBAL_GTT);
>  		intel_ring_emit_reg(ring, DERRMR);
> -		intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
> +		intel_ring_emit(ring, req->engine->scratch->node.start + 256);
>  		if (IS_GEN8(dev)) {
>  			intel_ring_emit(ring, 0);
>  			intel_ring_emit(ring, MI_NOOP);
> @@ -11634,6 +11637,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  	struct intel_engine_cs *engine;
>  	bool mmio_flip;
>  	struct drm_i915_gem_request *request;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	/*
> @@ -11739,9 +11743,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  
>  	mmio_flip = use_mmio_flip(engine, obj);
>  
> -	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
> -	if (ret)
> +	vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto cleanup_pending;
> +	}
>  
>  	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
>  						  obj, 0);
> @@ -13965,7 +13971,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>  		if (ret)
>  			DRM_DEBUG_KMS("failed to attach phys object\n");
>  	} else {
> -		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
> +		struct i915_vma *vma;
> +
> +		vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
> +		if (IS_ERR(vma))
> +			ret = PTR_ERR(vma);
>  	}
>  
>  	if (ret == 0) {
> @@ -14334,7 +14344,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
>  	if (!obj)
>  		addr = 0;
>  	else if (!INTEL_INFO(dev)->cursor_needs_physical)
> -		addr = i915_gem_obj_ggtt_offset(obj);
> +		addr = i915_gem_object_ggtt_offset(obj, NULL);
>  	else
>  		addr = obj->phys_handle->busaddr;
>  
> @@ -16160,7 +16170,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct drm_crtc *c;
>  	struct drm_i915_gem_object *obj;
> -	int ret;
>  
>  	intel_init_gt_powersave(dev_priv);
>  
> @@ -16174,15 +16183,17 @@ void intel_modeset_gem_init(struct drm_device *dev)
>  	 * for this.
>  	 */
>  	for_each_crtc(dev, c) {
> +		struct i915_vma *vma;
> +
>  		obj = intel_fb_obj(c->primary->fb);
>  		if (obj == NULL)
>  			continue;
>  
>  		mutex_lock(&dev->struct_mutex);
> -		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
> +		vma = intel_pin_and_fence_fb_obj(c->primary->fb,
>  						 c->primary->state->rotation);
>  		mutex_unlock(&dev->struct_mutex);
> -		if (ret) {
> +		if (IS_ERR(vma)) {
>  			DRM_ERROR("failed to pin boot fb on pipe %d\n",
>  				  to_intel_crtc(c)->pipe);
>  			drm_framebuffer_unreference(c->primary->fb);
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 834646b4cc3f..30ef29873571 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -159,6 +159,7 @@ struct intel_framebuffer {
>  struct intel_fbdev {
>  	struct drm_fb_helper helper;
>  	struct intel_framebuffer *fb;
> +	struct i915_vma *vma;
>  	async_cookie_t cookie;
>  	int preferred_bpp;
>  };
> @@ -1207,8 +1208,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
>  void intel_release_load_detect_pipe(struct drm_connector *connector,
>  				    struct intel_load_detect_pipe *old,
>  				    struct drm_modeset_acquire_ctx *ctx);
> -int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
> -			       unsigned int rotation);
> +struct i915_vma *
> +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
>  void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
>  struct drm_framebuffer *
>  __intel_framebuffer_create(struct drm_device *dev,
> diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
> index 45ee07b888a0..e1eb96b50ec1 100644
> --- a/drivers/gpu/drm/i915/intel_fbc.c
> +++ b/drivers/gpu/drm/i915/intel_fbc.c
> @@ -742,7 +742,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
>  	/* FIXME: We lack the proper locking here, so only run this on the
>  	 * platforms that need. */
>  	if (IS_GEN(dev_priv, 5, 6))
> -		cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
> +		cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
>  	cache->fb.pixel_format = fb->pixel_format;
>  	cache->fb.stride = fb->pitches[0];
>  	cache->fb.fence_reg = obj->fence_reg;
> diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
> index 10600975fe8d..e76d18f7c733 100644
> --- a/drivers/gpu/drm/i915/intel_fbdev.c
> +++ b/drivers/gpu/drm/i915/intel_fbdev.c
> @@ -187,7 +187,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
>  	struct fb_info *info;
>  	struct drm_framebuffer *fb;
>  	struct i915_vma *vma;
> -	struct drm_i915_gem_object *obj;
>  	bool prealloc = false;
>  	void *vaddr;
>  	int ret;
> @@ -215,17 +214,17 @@ static int intelfb_create(struct drm_fb_helper *helper,
>  		sizes->fb_height = intel_fb->base.height;
>  	}
>  
> -	obj = intel_fb->obj;
> -
>  	mutex_lock(&dev->struct_mutex);
>  
>  	/* Pin the GGTT vma for our access via info->screen_base.
>  	 * This also validates that any existing fb inherited from the
>  	 * BIOS is suitable for own access.
>  	 */
> -	ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
> -	if (ret)
> +	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto out_unlock;
> +	}
>  
>  	info = drm_fb_helper_alloc_fbi(helper);
>  	if (IS_ERR(info)) {
> @@ -245,8 +244,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
>  	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
>  	info->fbops = &intelfb_ops;
>  
> -	vma = i915_gem_obj_to_ggtt(obj);
> -
>  	/* setup aperture base/size for vesafb takeover */
>  	info->apertures->ranges[0].base = dev->mode_config.fb_base;
>  	info->apertures->ranges[0].size = ggtt->mappable_end;
> @@ -273,14 +270,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
>  	 * If the object is stolen however, it will be full of whatever
>  	 * garbage was left in there.
>  	 */
> -	if (ifbdev->fb->obj->stolen && !prealloc)
> +	if (intel_fb->obj->stolen && !prealloc)
>  		memset_io(info->screen_base, 0, info->screen_size);
>  
>  	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
>  
> -	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
> -		      fb->width, fb->height,
> -		      i915_gem_obj_ggtt_offset(obj), obj);
> +	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx\n",
> +		      fb->width, fb->height, vma->node.start);
> +	ifbdev->vma = vma;
>  
>  	mutex_unlock(&dev->struct_mutex);
>  	vga_switcheroo_client_fb_set(dev->pdev, info);
> diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
> index 1ecf88fd0b10..64f57c07afcc 100644
> --- a/drivers/gpu/drm/i915/intel_guc_loader.c
> +++ b/drivers/gpu/drm/i915/intel_guc_loader.c
> @@ -235,12 +235,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
>   * Note that GuC needs the CSS header plus uKernel code to be copied by the
>   * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
>   */
> -static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
> +static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
> +			      struct i915_vma *vma)
>  {
>  	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
> -	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
>  	unsigned long offset;
> -	struct sg_table *sg = fw_obj->pages;
> +	struct sg_table *sg = vma->obj->pages;
>  	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
>  	int i, ret = 0;
>  
> @@ -257,7 +257,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
>  	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
>  
>  	/* Set the source address for the new blob */
> -	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
> +	offset = vma->node.start + guc_fw->header_offset;
>  	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
>  	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
>  
> @@ -312,6 +312,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
>  {
>  	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
>  	struct drm_device *dev = dev_priv->dev;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
> @@ -320,10 +321,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
>  		return ret;
>  	}
>  
> -	ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
> -	if (ret) {
> -		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
> -		return ret;
> +	vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
> +	if (IS_ERR(vma)) {
> +		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
> +		return PTR_ERR(vma);
>  	}
>  
>  	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
> @@ -364,7 +365,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
>  
>  	set_guc_init_params(dev_priv);
>  
> -	ret = guc_ucode_xfer_dma(dev_priv);
> +	ret = guc_ucode_xfer_dma(dev_priv, vma);
>  
>  	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>  
> @@ -372,7 +373,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
>  	 * We keep the object pages for reuse during resume. But we can unpin it
>  	 * now that DMA has completed, so it doesn't continue to take up space.
>  	 */
> -	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
> +	i915_vma_unpin(vma);
>  
>  	return ret;
>  }
> @@ -653,12 +654,8 @@ fail:
>  	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
>  		  guc_fw->guc_fw_path, err);
>  
> -	mutex_lock(&dev->struct_mutex);
> -	obj = guc_fw->guc_fw_obj;
> -	if (obj)
> -		i915_gem_object_put(obj);
> +	i915_gem_object_put_unlocked(guc_fw->guc_fw_obj);
>  	guc_fw->guc_fw_obj = NULL;
> -	mutex_unlock(&dev->struct_mutex);
>  
>  	release_firmware(fw);		/* OK even if fw is NULL */
>  	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
> @@ -737,7 +734,7 @@ void intel_guc_fini(struct drm_device *dev)
>  
>  	i915_gem_object_put(guc_fw->guc_fw_obj);
>  	guc_fw->guc_fw_obj = NULL;
> -	mutex_unlock(&dev->struct_mutex);
>  
>  	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
> +	mutex_unlock(&dev->struct_mutex);
>  }
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 4bf63af2a282..49e7bf170a04 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -326,7 +326,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
>  	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
>  
>  	desc = engine->ctx_desc_template;			/* bits  0-11 */
> -	desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
> +	desc |= ce->vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
>  								/* bits 12-31 */
>  	desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;		/* bits 32-52 */
>  
> @@ -765,6 +765,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  {
>  	struct drm_i915_private *dev_priv = ctx->i915;
>  	struct intel_context *ce = &ctx->engine[engine->id];
> +	struct i915_vma *vma;
>  	void *vaddr;
>  	u32 *lrc_reg_state;
>  	int ret;
> @@ -774,16 +775,18 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  	if (ce->pin_count++)
>  		return 0;
>  
> -	ret = i915_gem_object_ggtt_pin(ce->state, NULL,
> +	vma = i915_gem_object_ggtt_pin(ce->state, NULL,
>  				       0, GEN8_LR_CONTEXT_ALIGN,
>  				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
> -	if (ret)
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto err;
> +	}
>  
> -	vaddr = i915_gem_object_pin_map(ce->state);
> +	vaddr = i915_gem_object_pin_map(vma->obj);
>  	if (IS_ERR(vaddr)) {
>  		ret = PTR_ERR(vaddr);
> -		goto unpin_ctx_obj;
> +		goto unpin_vma;
>  	}
>  
>  	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
> @@ -792,12 +795,12 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  	if (ret)
>  		goto unpin_map;
>  
> -	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
> +	ce->vma = vma;
>  	intel_lr_context_descriptor_update(ctx, engine);
>  
>  	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
>  	ce->lrc_reg_state = lrc_reg_state;
> -	ce->state->dirty = true;
> +	vma->obj->dirty = true;
>  
>  	/* Invalidate GuC TLB. */
>  	if (i915.enable_guc_submission)
> @@ -807,9 +810,9 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  	return 0;
>  
>  unpin_map:
> -	i915_gem_object_unpin_map(ce->state);
> -unpin_ctx_obj:
> -	i915_gem_object_ggtt_unpin(ce->state);
> +	i915_gem_object_unpin_map(vma->obj);
> +unpin_vma:
> +	__i915_vma_unpin(vma);
>  err:
>  	ce->pin_count = 0;
>  	return ret;
> @@ -829,9 +832,9 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
>  	intel_ring_unpin(ce->ring);
>  
>  	i915_gem_object_unpin_map(ce->state);
> -	i915_gem_object_ggtt_unpin(ce->state);
> +	i915_vma_unpin(ce->vma);
>  
> -	ce->lrc_vma = NULL;
> +	ce->vma = NULL;
>  	ce->lrc_desc = 0;
>  	ce->lrc_reg_state = NULL;
>  
> @@ -921,7 +924,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
>  	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
>  				   MI_SRM_LRM_GLOBAL_GTT));
>  	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
> -	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
> +	wa_ctx_emit(batch, index, engine->scratch->node.start + 256);
>  	wa_ctx_emit(batch, index, 0);
>  
>  	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
> @@ -939,7 +942,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
>  	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
>  				   MI_SRM_LRM_GLOBAL_GTT));
>  	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
> -	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
> +	wa_ctx_emit(batch, index, engine->scratch->node.start + 256);
>  	wa_ctx_emit(batch, index, 0);
>  
>  	return index;
> @@ -1013,7 +1016,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
>  
>  	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
>  	/* Actual scratch location is at 128 bytes offset */
> -	scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
> +	scratch_addr = engine->scratch->node.start + 2*CACHELINE_BYTES;
>  
>  	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
>  	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
> @@ -1142,47 +1145,41 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
>  	return wa_ctx_end(wa_ctx, *offset = index, 1);
>  }
>  
> -static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
> +static struct i915_vma *
> +lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
>  {
> -	int ret;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
>  
> -	engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
> -						   PAGE_ALIGN(size));
> -	if (IS_ERR(engine->wa_ctx.obj)) {
> -		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
> -		ret = PTR_ERR(engine->wa_ctx.obj);
> -		engine->wa_ctx.obj = NULL;
> -		return ret;
> -	}
> +	obj = i915_gem_object_create(engine->i915->dev, PAGE_ALIGN(size));
> +	if (IS_ERR(obj))
> +		return ERR_CAST(obj);
>  
> -	ret = i915_gem_object_ggtt_pin(engine->wa_ctx.obj, NULL,
> -				       0, PAGE_SIZE, 0);
> -	if (ret) {
> -		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
> -				 ret);
> -		i915_gem_object_put(engine->wa_ctx.obj);
> -		return ret;
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, 0);
> +	if (IS_ERR(vma)) {
> +		i915_gem_object_put(obj);
> +		return vma;
>  	}
>  
> -	return 0;
> +	return vma;
>  }
>  
>  static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
>  {
> -	if (engine->wa_ctx.obj) {
> -		i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
> -		i915_gem_object_put(engine->wa_ctx.obj);
> -		engine->wa_ctx.obj = NULL;
> +	if (engine->wa_ctx.vma) {
> +		i915_vma_unpin(engine->wa_ctx.vma);
> +		i915_gem_object_put(engine->wa_ctx.vma->obj);
> +		engine->wa_ctx.vma = NULL;
>  	}
>  }
>  
>  static int intel_init_workaround_bb(struct intel_engine_cs *engine)
>  {
> -	int ret;
> +	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
>  	uint32_t *batch;
>  	uint32_t offset;
>  	struct page *page;
> -	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
> +	int ret;
>  
>  	WARN_ON(engine->id != RCS);
>  
> @@ -1194,20 +1191,22 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
>  	}
>  
>  	/* some WA perform writes to scratch page, ensure it is valid */
> -	if (engine->scratch.obj == NULL) {
> +	if (engine->scratch == NULL) {
>  		DRM_ERROR("scratch page not allocated for %s\n", engine->name);
>  		return -EINVAL;
>  	}
>  
> -	ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
> -	if (ret) {
> +	wa_ctx->vma = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
> +	if (IS_ERR(wa_ctx->vma)) {
> +		ret = PTR_ERR(wa_ctx->vma);
>  		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
>  		return ret;
>  	}
>  
> -	page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
> +	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
>  	batch = kmap_atomic(page);
>  	offset = 0;
> +	ret = 0;
>  
>  	if (IS_GEN8(engine->i915)) {
>  		ret = gen8_init_indirectctx_bb(engine,
> @@ -1464,7 +1463,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  {
>  	struct intel_ring *ring = request->ring;
>  	struct intel_engine_cs *engine = request->engine;
> -	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES;
>  	bool vf_flush_wa = false;
>  	u32 flags = 0;
>  	int ret;
> @@ -1650,9 +1649,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
>  
>  	intel_engine_fini_breadcrumbs(engine);
>  
> -	if (engine->status_page.obj) {
> -		i915_gem_object_unpin_map(engine->status_page.obj);
> -		engine->status_page.obj = NULL;
> +	if (engine->status_page.vma) {
> +		i915_gem_object_unpin_map(engine->status_page.vma->obj);
> +		engine->status_page.vma = NULL;
>  	}
>  	intel_lr_context_unpin(dev_priv->kernel_context, engine);
>  
> @@ -1692,19 +1691,19 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
>  }
>  
>  static int
> -lrc_setup_hws(struct intel_engine_cs *engine,
> -	      struct drm_i915_gem_object *dctx_obj)
> +lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
>  {
> +#define HWS_OFFSET (LRC_PPHWSP_PN * PAGE_SIZE)
>  	void *hws;
>  
>  	/* The HWSP is part of the default context object in LRC mode. */
> -	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
> -				       LRC_PPHWSP_PN * PAGE_SIZE;
> -	hws = i915_gem_object_pin_map(dctx_obj);
> +	hws = i915_gem_object_pin_map(vma->obj);
>  	if (IS_ERR(hws))
>  		return PTR_ERR(hws);
> -	engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
> -	engine->status_page.obj = dctx_obj;
> +
> +	engine->status_page.page_addr = hws + HWS_OFFSET;
> +	engine->status_page.gfx_addr = vma->node.start + HWS_OFFSET;
> +	engine->status_page.vma = vma;
>  
>  	return 0;
>  }
> @@ -1828,7 +1827,7 @@ logical_ring_init(struct intel_engine_cs *engine)
>  	}
>  
>  	/* And setup the hardware status page. */
> -	ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
> +	ret = lrc_setup_hws(engine, dctx->engine[engine->id].vma);
>  	if (ret) {
>  		DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
>  		goto error;
> @@ -2109,9 +2108,9 @@ populate_lr_context(struct i915_gem_context *ctx,
>  			       RING_INDIRECT_CTX(engine->mmio_base), 0);
>  		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
>  			       RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
> -		if (engine->wa_ctx.obj) {
> +		if (engine->wa_ctx.vma) {
>  			struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
> -			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
> +			uint32_t ggtt_offset = wa_ctx->vma->node.start;
>  
>  			reg_state[CTX_RCS_INDIRECT_CTX+1] =
>  				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 9b0fb7e23cbb..75bdd335d565 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -170,8 +170,8 @@ struct overlay_registers {
>  struct intel_overlay {
>  	struct drm_i915_private *i915;
>  	struct intel_crtc *crtc;
> -	struct drm_i915_gem_object *vid_bo;
> -	struct drm_i915_gem_object *old_vid_bo;
> +	struct drm_i915_gem_object *vid_bo, *old_vid_bo;
> +	struct i915_vma *vid_vma, *old_vid_vma;
>  	bool active;
>  	bool pfit_active;
>  	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
> @@ -316,7 +316,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
>  {
>  	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
>  
> -	i915_gem_object_ggtt_unpin(obj);
> +	i915_gem_object_unpin_from_display_plane(overlay->old_vid_vma);
>  	i915_gem_object_put(obj);
>  
>  	overlay->old_vid_bo = NULL;
> @@ -324,14 +324,13 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
>  
>  static void intel_overlay_off_tail(struct intel_overlay *overlay)
>  {
> -	struct drm_i915_gem_object *obj = overlay->vid_bo;
> -
>  	/* never have the overlay hw on without showing a frame */
> -	if (WARN_ON(!obj))
> +	if (WARN_ON(overlay->vid_vma))
>  		return;
>  
> -	i915_gem_object_ggtt_unpin(obj);
> -	i915_gem_object_put(obj);
> +	i915_gem_object_unpin_from_display_plane(overlay->vid_vma);
> +	i915_gem_object_put(overlay->vid_bo);
> +	overlay->vid_vma = NULL;
>  	overlay->vid_bo = NULL;
>  
>  	overlay->crtc->overlay = NULL;
> @@ -751,6 +750,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
>  	struct drm_i915_private *dev_priv = overlay->i915;
>  	u32 swidth, swidthsw, sheight, ostride;
>  	enum pipe pipe = overlay->crtc->pipe;
> +	struct i915_vma *vma;
>  
>  	lockdep_assert_held(&dev_priv->dev->struct_mutex);
>  	WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
> @@ -759,10 +759,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
>  	if (ret != 0)
>  		return ret;
>  
> -	ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
> +	vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
>  						   &i915_ggtt_view_normal);
> -	if (ret != 0)
> -		return ret;
> +	if (IS_ERR(vma))
> +		return PTR_ERR(vma);
>  
>  	ret = i915_gem_object_put_fence(new_bo);
>  	if (ret)
> @@ -805,7 +805,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
>  	swidth = params->src_w;
>  	swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
>  	sheight = params->src_h;
> -	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
> +	iowrite32(vma->node.start + params->offset_Y, &regs->OBUF_0Y);
>  	ostride = params->stride_Y;
>  
>  	if (params->format & I915_OVERLAY_YUV_PLANAR) {
> @@ -819,8 +819,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
>  				      params->src_w/uv_hscale);
>  		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
>  		sheight |= (params->src_h/uv_vscale) << 16;
> -		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
> -		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
> +		iowrite32(vma->node.start + params->offset_U, &regs->OBUF_0U);
> +		iowrite32(vma->node.start + params->offset_V, &regs->OBUF_0V);
>  		ostride |= params->stride_UV << 16;
>  	}
>  
> @@ -845,14 +845,16 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
>  			  INTEL_FRONTBUFFER_OVERLAY(pipe));
>  
>  	overlay->old_vid_bo = overlay->vid_bo;
> +	overlay->old_vid_vma = overlay->vid_vma;
>  	overlay->vid_bo = new_bo;
> +	overlay->vid_vma = vma;
>  
>  	intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
>  
>  	return 0;
>  
>  out_unpin:
> -	i915_gem_object_ggtt_unpin(new_bo);
> +	i915_gem_object_unpin_from_display_plane(vma);
>  	return ret;
>  }
>  
> @@ -1380,6 +1382,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
>  	struct intel_overlay *overlay;
>  	struct drm_i915_gem_object *reg_bo;
>  	struct overlay_registers __iomem *regs;
> +	struct i915_vma *vma = NULL;
>  	int ret;
>  
>  	if (!HAS_OVERLAY(dev_priv))
> @@ -1412,13 +1415,14 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
>  		}
>  		overlay->flip_addr = reg_bo->phys_handle->busaddr;
>  	} else {
> -		ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
> +		vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
>  					       0, PAGE_SIZE, PIN_MAPPABLE);
> -		if (ret) {
> +		if (IS_ERR(vma)) {
>  			DRM_ERROR("failed to pin overlay register bo\n");
> +			ret = PTR_ERR(vma);
>  			goto out_free_bo;
>  		}
> -		overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
> +		overlay->flip_addr = vma->node.start;
>  
>  		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
>  		if (ret) {
> @@ -1450,8 +1454,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
>  	return;
>  
>  out_unpin_bo:
> -	if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
> -		i915_gem_object_ggtt_unpin(reg_bo);
> +	if (vma)
> +		i915_vma_unpin(vma);
>  out_free_bo:
>  	i915_gem_object_put(reg_bo);
>  out_free:
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index c8211913f2d6..32add39ee9dd 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -182,7 +182,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
> -	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	       	req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -219,7 +219,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  {
>  	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
> -	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	       	req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
>  	u32 flags = 0;
>  	int ret;
>  
> @@ -294,7 +294,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
>  {
>  	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
> -	       	req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	       	req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
>  	u32 flags = 0;
>  	int ret;
>  
> @@ -379,7 +379,8 @@ static int
>  gen8_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr =
> +	       	req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
>  	u32 flags = 0;
>  	int ret;
>  
> @@ -540,7 +541,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  	struct intel_ring *ring = engine->buffer;
> -	struct drm_i915_gem_object *obj = ring->obj;
> +	struct i915_vma *vma = ring->vma;
>  	int ret = 0;
>  
>  	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> @@ -580,7 +581,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
>  	 * registers with the above sequence (the readback of the HEAD registers
>  	 * also enforces ordering), otherwise the hw might lose the new ring
>  	 * register values. */
> -	I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
> +	I915_WRITE_START(engine, vma->node.start);
>  
>  	/* WaClearRingBufHeadRegAtInit:ctg,elk */
>  	if (I915_READ_HEAD(engine))
> @@ -595,16 +596,15 @@ static int init_ring_common(struct intel_engine_cs *engine)
>  
>  	/* If the head is still not zero, the ring is dead */
>  	if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
> -		     I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
> +		     I915_READ_START(engine) == vma->node.start &&
>  		     (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
>  		DRM_ERROR("%s initialization failed "
> -			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
> +			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n",
>  			  engine->name,
>  			  I915_READ_CTL(engine),
>  			  I915_READ_CTL(engine) & RING_VALID,
>  			  I915_READ_HEAD(engine), I915_READ_TAIL(engine),
> -			  I915_READ_START(engine),
> -			  (unsigned long)i915_gem_obj_ggtt_offset(obj));
> +			  I915_READ_START(engine), (u32)vma->node.start);
>  		ret = -EIO;
>  		goto out;
>  	}
> @@ -624,20 +624,21 @@ out:
>  
>  void intel_fini_pipe_control(struct intel_engine_cs *engine)
>  {
> -	if (engine->scratch.obj == NULL)
> +	if (!engine->scratch)
>  		return;
>  
> -	i915_gem_object_ggtt_unpin(engine->scratch.obj);
> -	i915_gem_object_put(engine->scratch.obj);
> -	engine->scratch.obj = NULL;
> +	i915_vma_unpin(engine->scratch);
> +	i915_gem_object_put(engine->scratch->obj);
> +	engine->scratch = NULL;
>  }
>  
>  int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
>  {
>  	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
>  	int ret;
>  
> -	WARN_ON(engine->scratch.obj);
> +	WARN_ON(engine->scratch);
>  
>  	obj = i915_gem_object_create_stolen(engine->i915->dev, size);
>  	if (obj == NULL)
> @@ -648,18 +649,19 @@ int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
>  		goto err;
>  	}
>  
> -	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH);
> -	if (ret)
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
>  		goto err_unref;
> +	}
>  
> -	engine->scratch.obj = obj;
> -	engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
> -	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
> -			 engine->name, engine->scratch.gtt_offset);
> +	engine->scratch = vma;
> +	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n",
> +			 engine->name, (long long)vma->node.start);
>  	return 0;
>  
>  err_unref:
> -	i915_gem_object_put(engine->scratch.obj);
> +	i915_gem_object_put(obj);
>  err:
>  	return ret;
>  }
> @@ -1217,10 +1219,13 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  
> -	if (dev_priv->semaphore_obj) {
> -		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
> -		i915_gem_object_put(dev_priv->semaphore_obj);
> -		dev_priv->semaphore_obj = NULL;
> +	if (dev_priv->semaphore_vma) {
> +		struct drm_i915_gem_object *obj = dev_priv->semaphore_vma->obj;
> +
> +		i915_vma_unpin(dev_priv->semaphore_vma);
> +		dev_priv->semaphore_vma = NULL;
> +
> +		i915_gem_object_put(obj);
>  	}
>  
>  	intel_fini_pipe_control(engine);
> @@ -1684,7 +1689,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
>  		   unsigned dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
> -	u32 cs_offset = req->engine->scratch.gtt_offset;
> +	u32 cs_offset = req->engine->scratch->node.start;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -1773,67 +1778,68 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
>  
>  static void cleanup_status_page(struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
>  
> -	obj = engine->status_page.obj;
> -	if (obj == NULL)
> +	vma = engine->status_page.vma;
> +	if (vma == NULL)
>  		return;
> +	engine->status_page.vma = NULL;
>  
> -	kunmap(sg_page(obj->pages->sgl));
> -	i915_gem_object_ggtt_unpin(obj);
> -	i915_gem_object_put(obj);
> -	engine->status_page.obj = NULL;
> +	kunmap(sg_page(vma->obj->pages->sgl));
> +	i915_vma_unpin(vma);
>  }
>  
>  static int init_status_page(struct intel_engine_cs *engine)
>  {
> -	struct drm_i915_gem_object *obj = engine->status_page.obj;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
> +	unsigned flags;
> +	int ret;
>  
> -	if (obj == NULL) {
> -		unsigned flags;
> -		int ret;
> +	if (engine->status_page.vma)
> +		return 0;
>  
> -		obj = i915_gem_object_create(engine->i915->dev, 4096);
> -		if (IS_ERR(obj)) {
> -			DRM_ERROR("Failed to allocate status page\n");
> -			return PTR_ERR(obj);
> -		}
> +	obj = i915_gem_object_create(engine->i915->dev, 4096);
> +	if (IS_ERR(obj)) {
> +		DRM_ERROR("Failed to allocate status page\n");
> +		return PTR_ERR(obj);
> +	}
>  
> -		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
> -		if (ret)
> -			goto err_unref;
> -
> -		flags = 0;
> -		if (!HAS_LLC(engine->i915))
> -			/* On g33, we cannot place HWS above 256MiB, so
> -			 * restrict its pinning to the low mappable arena.
> -			 * Though this restriction is not documented for
> -			 * gen4, gen5, or byt, they also behave similarly
> -			 * and hang if the HWS is placed at the top of the
> -			 * GTT. To generalise, it appears that all !llc
> -			 * platforms have issues with us placing the HWS
> -			 * above the mappable region (even though we never
> -			 * actualy map it).
> -			 */
> -			flags |= PIN_MAPPABLE;
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
> -		if (ret) {
> -err_unref:
> -			i915_gem_object_put(obj);
> -			return ret;
> -		}
> +	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
> +	if (ret)
> +		goto err_unref;
>  
> -		engine->status_page.obj = obj;
> +	flags = 0;
> +	if (!HAS_LLC(engine->i915))
> +		/* On g33, we cannot place HWS above 256MiB, so
> +		 * restrict its pinning to the low mappable arena.
> +		 * Though this restriction is not documented for
> +		 * gen4, gen5, or byt, they also behave similarly
> +		 * and hang if the HWS is placed at the top of the
> +		 * GTT. To generalise, it appears that all !llc
> +		 * platforms have issues with us placing the HWS
> +		 * above the mappable region (even though we never
> +		 * actualy map it).
> +		 */
> +		flags |= PIN_MAPPABLE;
> +	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
> +		goto err_unref;
>  	}
>  
> -	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
> +	engine->status_page.vma = vma;
> +	engine->status_page.gfx_addr = vma->node.start;
>  	engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
> -	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
>  
>  	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
>  			engine->name, engine->status_page.gfx_addr);
>  
>  	return 0;
> +
> +err_unref:
> +	i915_gem_object_put(obj);
> +	return ret;
>  }
>  
>  static int init_phys_status_page(struct intel_engine_cs *engine)
> @@ -1857,15 +1863,16 @@ int intel_ring_pin(struct intel_ring *ring)
>  {
>  	struct drm_i915_private *dev_priv = ring->engine->i915;
>  	struct drm_i915_gem_object *obj = ring->obj;
> +	struct i915_vma *vma;
>  	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
>  	unsigned flags = PIN_OFFSET_BIAS | 4096;
>  	void *addr;
>  	int ret;
>  
>  	if (HAS_LLC(dev_priv) && !obj->stolen) {
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags);
> -		if (ret)
> -			return ret;
> +		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags);
> +		if (IS_ERR(vma))
> +			return PTR_ERR(vma);
>  
>  		ret = i915_gem_object_set_to_cpu_domain(obj, true);
>  		if (ret)
> @@ -1877,10 +1884,10 @@ int intel_ring_pin(struct intel_ring *ring)
>  			goto err_unpin;
>  		}
>  	} else {
> -		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
> +		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
>  					       flags | PIN_MAPPABLE);
> -		if (ret)
> -			return ret;
> +		if (IS_ERR(vma))
> +			return PTR_ERR(vma);
>  
>  		ret = i915_gem_object_set_to_gtt_domain(obj, true);
>  		if (ret)
> @@ -1889,7 +1896,7 @@ int intel_ring_pin(struct intel_ring *ring)
>  		/* Access through the GTT requires the device to be awake. */
>  		assert_rpm_wakelock_held(dev_priv);
>  
> -		addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
> +		addr = i915_vma_pin_iomap(vma);
>  		if (IS_ERR(addr)) {
>  			ret = PTR_ERR(addr);
>  			goto err_unpin;
> @@ -1897,11 +1904,11 @@ int intel_ring_pin(struct intel_ring *ring)
>  	}
>  
>  	ring->vaddr = addr;
> -	ring->vma = i915_gem_obj_to_ggtt(obj);
> +	ring->vma = vma;
>  	return 0;
>  
>  err_unpin:
> -	i915_gem_object_ggtt_unpin(obj);
> +	i915_vma_unpin(vma);
>  	return ret;
>  }
>  
> @@ -1916,7 +1923,7 @@ void intel_ring_unpin(struct intel_ring *ring)
>  		i915_vma_unpin_iomap(ring->vma);
>  	ring->vaddr = NULL;
>  
> -	i915_gem_object_ggtt_unpin(ring->obj);
> +	i915_vma_unpin(ring->vma);
>  	ring->vma = NULL;
>  }
>  
> @@ -2007,10 +2014,14 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
>  		return 0;
>  
>  	if (ce->state) {
> -		ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0,
> +		struct i915_vma *vma;
> +
> +		vma = i915_gem_object_ggtt_pin(ce->state, NULL, 0,
>  					       ctx->ggtt_alignment, PIN_HIGH);
> -		if (ret)
> +		if (vma)
>  			goto error;
> +
> +		ce->vma = vma;
>  	}
>  
>  	/* The kernel context is only used as a placeholder for flushing the
> @@ -2041,8 +2052,8 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
>  	if (--ce->pin_count)
>  		return;
>  
> -	if (ce->state)
> -		i915_gem_object_ggtt_unpin(ce->state);
> +	if (ce->vma)
> +		i915_vma_unpin(ce->vma);
>  
>  	i915_gem_context_put(ctx);
>  }
> @@ -2335,8 +2346,8 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
>  		if (HAS_VEBOX(dev_priv))
>  			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
>  	}
> -	if (dev_priv->semaphore_obj) {
> -		struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
> +	if (dev_priv->semaphore_vma) {
> +		struct drm_i915_gem_object *obj = dev_priv->semaphore_vma->obj;
>  		struct page *page = i915_gem_object_get_dirty_page(obj, 0);
>  		void *semaphores = kmap(page);
>  		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
> @@ -2576,16 +2587,20 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>  				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
>  				i915.semaphores = 0;
>  			} else {
> +				struct i915_vma *vma;
> +
>  				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
> -				ret = i915_gem_object_ggtt_pin(obj, NULL,
> +				vma = i915_gem_object_ggtt_pin(obj, NULL,
>  							       0, 0,
>  							       PIN_HIGH);
> -				if (ret != 0) {
> +				if (IS_ERR(vma)) {
>  					i915_gem_object_put(obj);
>  					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
>  					i915.semaphores = 0;
> -				} else
> -					dev_priv->semaphore_obj = obj;
> +					vma = NULL;
> +				}
> +
> +				dev_priv->semaphore_vma = vma;
>  			}
>  		}
>  
> @@ -2596,7 +2611,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>  		engine->irq_disable = gen8_ring_disable_irq;
>  		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
>  		if (i915.semaphores) {
> -			WARN_ON(!dev_priv->semaphore_obj);
>  			engine->semaphore.sync_to = gen8_ring_sync;
>  			engine->semaphore.signal = gen8_rcs_signal;
>  			GEN8_RING_SEMAPHORE_INIT(engine);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index d19fb8c24919..934d5722dc27 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -26,10 +26,10 @@
>   */
>  #define I915_RING_FREE_SPACE 64
>  
> -struct  intel_hw_status_page {
> +struct intel_hw_status_page {
>  	u32		*page_addr;
>  	unsigned int	gfx_addr;
> -	struct		drm_i915_gem_object *obj;
> +	struct		i915_vma *vma;
>  };
>  
>  #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
> @@ -57,16 +57,13 @@ struct  intel_hw_status_page {
>  #define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
>  	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
>  #define GEN8_SIGNAL_OFFSET(__ring, to)			     \
> -	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
> +	(dev_priv->semaphore_vma->node.start + \
>  	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
>  #define GEN8_WAIT_OFFSET(__ring, from)			     \
> -	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
> +	(dev_priv->semaphore_vma->node.start + \
>  	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
>  
>  #define GEN8_RING_SEMAPHORE_INIT(e) do { \
> -	if (!dev_priv->semaphore_obj) { \
> -		break; \
> -	} \
>  	(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
>  	(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
>  	(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
> @@ -97,8 +94,8 @@ struct intel_engine_hangcheck {
>  
>  struct intel_ring {
>  	struct drm_i915_gem_object *obj;
> -	void *vaddr;
>  	struct i915_vma *vma;
> +	void *vaddr;
>  
>  	struct intel_engine_cs *engine;
>  	struct list_head link;
> @@ -139,7 +136,7 @@ struct  i915_ctx_workarounds {
>  		u32 offset;
>  		u32 size;
>  	} indirect_ctx, per_ctx;
> -	struct drm_i915_gem_object *obj;
> +	struct i915_vma *vma;
>  };
>  
>  struct drm_i915_gem_request;
> @@ -325,10 +322,7 @@ struct intel_engine_cs {
>  
>  	struct intel_engine_hangcheck hangcheck;
>  
> -	struct {
> -		struct drm_i915_gem_object *obj;
> -		u32 gtt_offset;
> -	} scratch;
> +	struct i915_vma *scratch;
>  
>  	bool needs_cmd_parser;
>  
> diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
> index 324ccb06397d..99bdbb9e4037 100644
> --- a/drivers/gpu/drm/i915/intel_sprite.c
> +++ b/drivers/gpu/drm/i915/intel_sprite.c
> @@ -462,8 +462,8 @@ vlv_update_plane(struct drm_plane *dplane,
>  
>  	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
>  	I915_WRITE(SPCNTR(pipe, plane), sprctl);
> -	I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
> -		   sprsurf_offset);
> +	I915_WRITE(SPSURF(pipe, plane),
> +		   i915_gem_object_ggtt_offset(obj, NULL) + sprsurf_offset);
>  	POSTING_READ(SPSURF(pipe, plane));
>  }
>  
> @@ -602,7 +602,7 @@ ivb_update_plane(struct drm_plane *plane,
>  		I915_WRITE(SPRSCALE(pipe), sprscale);
>  	I915_WRITE(SPRCTL(pipe), sprctl);
>  	I915_WRITE(SPRSURF(pipe),
> -		   i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
> +		   i915_gem_object_ggtt_offset(obj, NULL) + sprsurf_offset);
>  	POSTING_READ(SPRSURF(pipe));
>  }
>  
> @@ -731,7 +731,7 @@ ilk_update_plane(struct drm_plane *plane,
>  	I915_WRITE(DVSSCALE(pipe), dvsscale);
>  	I915_WRITE(DVSCNTR(pipe), dvscntr);
>  	I915_WRITE(DVSSURF(pipe),
> -		   i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
> +		   i915_gem_object_ggtt_offset(obj, NULL) + dvssurf_offset);
>  	POSTING_READ(DVSSURF(pipe));
>  }
>  
> -- 
> 2.8.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux