Re: [RFC PATCH] drm/ttm: get rid of ttm bo refcounting

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 11/12/2013 07:26 PM, Maarten Lankhorst wrote:
Most drivers have refcounting done in gem, so lets get rid of another refcounting layer. ;)
It has been confusing to keep track of 2 refcounts, so lets just let the driver worry about
refcounting, and keep it hidden from ttm entirely. The core doesn't need to know about the
refcounting anywhere.

Vmwgfx has 1 bo that doesn't use vmw_dma_buffer, but instead ttm_bo_create. Converting this call
makes every bo use vmw_dma_buffer as base, which means I can simply add refcount to vmw_dma_buffer.

Mostly meant as a RFC, so I only took effort into converting vmwgfx, radeon, nouveau. Thoughts?

Hmm. I don't really see the purpose of this?

First the ttm bo reference is used by the vm system, so you need to duplicate a lot of vm stuff across all drivers, which is bad because if something needs to change here, we need to change it in all drivers. Seems you've forgotten qxl, cirrus,
mgag200 and ast mmap() here?

Second, the vmwgfx driver relies so much on ttm refcounting that you needed to re-add it for this driver, and actually will rely even more on bare ttm objects in our upcoming hardware revision where they are used as page table bos.

Finally, it looks to me like the gain in the gem drivers can be accomplished by just implementing ttm_bo_release() on top of ttm_bo_unref(), and leave the vm system alone. Sure, you'll add an extra atomic operation on object destruction, but that's not a high price to pay...

Thanks,
/Thomas

I've only done some very basic smoke testing btw, I know nouveau and radeon boot. No idea about vmwgfx.
---
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index af0b868a9dfd..9fcf38a7923a 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,26 +449,13 @@ int ast_dumb_create(struct drm_file *file,
  	return 0;
  }
-void ast_bo_unref(struct ast_bo **bo)
-{
-	struct ttm_buffer_object *tbo;
-
-	if ((*bo) == NULL)
-		return;
-
-	tbo = &((*bo)->bo);
-	ttm_bo_unref(&tbo);
-	if (tbo == NULL)
-		*bo = NULL;
-
-}
  void ast_gem_free_object(struct drm_gem_object *obj)
  {
  	struct ast_bo *ast_bo = gem_to_ast_bo(obj);
if (!ast_bo)
  		return;
-	ast_bo_unref(&ast_bo);
+	ttm_bo_release(&ast_bo->bo);
  }
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 78e76f24343d..f4d3fd5be1e7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,27 +255,13 @@ int cirrus_dumb_create(struct drm_file *file,
  	return 0;
  }
-void cirrus_bo_unref(struct cirrus_bo **bo)
-{
-	struct ttm_buffer_object *tbo;
-
-	if ((*bo) == NULL)
-		return;
-
-	tbo = &((*bo)->bo);
-	ttm_bo_unref(&tbo);
-	if (tbo == NULL)
-		*bo = NULL;
-
-}
-
  void cirrus_gem_free_object(struct drm_gem_object *obj)
  {
  	struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
if (!cirrus_bo)
  		return;
-	cirrus_bo_unref(&cirrus_bo);
+	ttm_bo_release(&cirrus_bo->bo);
  }
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4761adedad2a..511b4e97a093 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -848,9 +848,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  	if (!node) {
  		mutex_unlock(&dev->struct_mutex);
  		return drm_mmap(filp, vma);
-	} else if (!drm_vma_node_is_allowed(node, filp)) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EACCES;
+	} else {
+		read_lock(&node->vm_lock);
+		ret = drm_vma_node_verify_access(node, filp);
+		read_unlock(&node->vm_lock);
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
  	}
obj = container_of(node, struct drm_gem_object, vma_node);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 63b471205072..482b9c996217 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
   * Search the list in @node whether @filp is currently on the list of allowed
   * open-files (see drm_vma_node_allow()).
   *
- * This is locked against concurrent access internally.
+ * This call requires the vm_lock to be held.
   *
   * RETURNS:
   * true iff @filp is on the list
@@ -416,8 +416,6 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
  	struct drm_vma_offset_file *entry;
  	struct rb_node *iter;
- read_lock(&node->vm_lock);
-
  	iter = node->vm_files.rb_node;
  	while (likely(iter)) {
  		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
@@ -429,8 +427,6 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
  			iter = iter->rb_left;
  	}
- read_unlock(&node->vm_lock);
-
  	return iter;
  }
  EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1120cb1db6d..ca8367505742 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,27 +310,13 @@ int mgag200_dumb_create(struct drm_file *file,
  	return 0;
  }
-void mgag200_bo_unref(struct mgag200_bo **bo)
-{
-	struct ttm_buffer_object *tbo;
-
-	if ((*bo) == NULL)
-		return;
-
-	tbo = &((*bo)->bo);
-	ttm_bo_unref(&tbo);
-	if (tbo == NULL)
-		*bo = NULL;
-
-}
-
  void mgag200_gem_free_object(struct drm_gem_object *obj)
  {
  	struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
if (!mgag200_bo)
  		return;
-	mgag200_bo_unref(&mgag200_bo);
+	ttm_bo_release(&mgag200_bo->bo);
  }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 29cc32f976f8..1ba8b90db8b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -146,8 +146,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  	struct drm_device *dev = drm->dev;
  	struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem.filp))
-		DRM_ERROR("bo %p still attached to GEM object\n", bo);
  	WARN_ON(nvbo->pin_refcnt > 0);
  	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
  	kfree(nvbo);
@@ -246,6 +244,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
  		/* ttm will call nouveau_bo_del_ttm if it fails.. */
  		return ret;
  	}
+	drm_gem_private_object_init(dev, &nvbo->gem, size);
*pnvbo = nvbo;
  	return 0;
@@ -1249,14 +1248,6 @@ out:
  }
static int
-nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
-
-	return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
-}
-
-static int
  nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  {
  	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -1513,7 +1504,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
  	.evict_flags = nouveau_bo_evict_flags,
  	.move_notify = nouveau_bo_move_ntfy,
  	.move = nouveau_bo_move,
-	.verify_access = nouveau_bo_verify_access,
  	.sync_obj_signaled = nouveau_bo_fence_signalled,
  	.sync_obj_wait = nouveau_bo_fence_wait,
  	.sync_obj_flush = nouveau_bo_fence_flush,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ff17c1f432fc..abbd8b83351f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -51,14 +51,15 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
if (!pnvbo)
  		return -EINVAL;
-	prev = *pnvbo;
-
-	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
-	if (prev) {
-		struct ttm_buffer_object *bo = &prev->bo;
- ttm_bo_unref(&bo);
+	prev = *pnvbo;
+	if (ref != prev) {
+		if (ref)
+			drm_gem_object_reference(&ref->gem);
+		if (prev)
+			drm_gem_object_unreference_unlocked(&prev->gem);
  	}
+	*pnvbo = ref;
return 0;
  }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 091753f29554..2c4aecbd8544 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -38,7 +38,6 @@ void
  nouveau_gem_object_del(struct drm_gem_object *gem)
  {
  	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
-	struct ttm_buffer_object *bo = &nvbo->bo;
if (gem->import_attach)
  		drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -47,7 +46,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
  	gem->filp = NULL;
-	ttm_bo_unref(&bo);
+	ttm_bo_release(&nvbo->bo);
  }
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 19e3757291fb..48c9277d41da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -277,16 +277,58 @@ const struct ttm_mem_type_manager_func nv04_gart_manager = {
  	nv04_gart_manager_debug
  };
+static void nouveau_bo_vm_open(struct vm_area_struct *vma)
+{
+	struct nouveau_bo *bo = nouveau_bo(vma->vm_private_data);
+
+	drm_gem_object_reference(&bo->gem);
+}
+
+static void nouveau_bo_vm_close(struct vm_area_struct *vma)
+{
+	struct nouveau_bo *bo = nouveau_bo(vma->vm_private_data);
+
+	drm_gem_object_unreference_unlocked(&bo->gem);
+	vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct nouveau_bo_vm_ops = {
+	.fault = ttm_bo_vm_fault,
+	.open = nouveau_bo_vm_open,
+	.close = nouveau_bo_vm_close
+};
+
  int
  nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
  {
  	struct drm_file *file_priv = filp->private_data;
  	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
+	struct drm_vma_offset_node *node;
+	struct nouveau_bo *bo;
+	int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  		return drm_mmap(filp, vma);
- return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+	drm_vma_offset_lock_lookup(&drm->ttm.bdev.vma_manager);
+	node = drm_vma_offset_lookup_locked(&drm->ttm.bdev.vma_manager,
+					    vma->vm_pgoff, vma_pages(vma));
+	if (node) {
+		bo = container_of(node, struct nouveau_bo, bo.vma_node);
+		ret = drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+
+		if (!ret && !kref_get_unless_zero(&bo->gem.refcount))
+			ret = -EINVAL;
+	} else
+		ret = -EINVAL;
+	drm_vma_offset_unlock_lookup(&drm->ttm.bdev.vma_manager);
+
+	if (!ret) {
+		vma->vm_private_data = &bo->bo;
+		vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+		vma->vm_ops = &nouveau_bo_vm_ops;
+	}
+	return ret;
  }
static int
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index b96f0c9d89b2..48c39dc04931 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -33,7 +33,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
  	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
if (qobj)
-		qxl_bo_unref(&qobj);
+		ttm_bo_release(&qobj->tbo);
  }
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 8691c76c5ef0..d644a7c7384a 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -209,19 +209,14 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
void qxl_bo_unref(struct qxl_bo **bo)
  {
-	struct ttm_buffer_object *tbo;
-
-	if ((*bo) == NULL)
-		return;
-	tbo = &((*bo)->tbo);
-	ttm_bo_unref(&tbo);
-	if (tbo == NULL)
-		*bo = NULL;
+	if (*bo)
+		drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
+	*bo = NULL;
  }
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
  {
-	ttm_bo_reference(&bo->tbo);
+	drm_gem_object_reference(&bo->gem_base);
  	return bo;
  }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 805c5e566b9a..79d95c76ead6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,9 +34,13 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
  	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
+		struct radeon_device *rdev = robj->rdev;
  		if (robj->gem_base.import_attach)
  			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
-		radeon_bo_unref(&robj);
+
+		down_read(&rdev->pm.mclk_lock);
+		ttm_bo_release(&robj->tbo);
+		up_read(&rdev->pm.mclk_lock);
  	}
  }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a3b92bfbe81b..d31b522b8b90 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -196,18 +196,10 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
  {
-	struct ttm_buffer_object *tbo;
-	struct radeon_device *rdev;
-
  	if ((*bo) == NULL)
  		return;
-	rdev = (*bo)->rdev;
-	tbo = &((*bo)->tbo);
-	down_read(&rdev->pm.mclk_lock);
-	ttm_bo_unref(&tbo);
-	up_read(&rdev->pm.mclk_lock);
-	if (tbo == NULL)
-		*bo = NULL;
+	drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
+	*bo = NULL;
  }
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
@@ -386,12 +378,6 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
  	return 0;
  }
-int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
-			     struct vm_area_struct *vma)
-{
-	return ttm_fbdev_mmap(vma, &bo->tbo);
-}
-
  int radeon_bo_get_surface_reg(struct radeon_bo *bo)
  {
  	struct radeon_device *rdev = bo->rdev;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 209b11150263..9685c6e1637b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -142,8 +142,6 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
  				struct list_head *head);
  extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
  				   struct list_head *head, int ring);
-extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
-				struct vm_area_struct *vma);
  extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
  				u32 tiling_flags, u32 pitch);
  extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 71245d6f34a2..eb1a697a7349 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -201,13 +201,6 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
  	*placement = rbo->placement;
  }
-static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
-	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
-
-	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
-}
-
  static void radeon_move_null(struct ttm_buffer_object *bo,
  			     struct ttm_mem_reg *new_mem)
  {
@@ -676,7 +669,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
  	.init_mem_type = &radeon_init_mem_type,
  	.evict_flags = &radeon_evict_flags,
  	.move = &radeon_bo_move,
-	.verify_access = &radeon_verify_access,
  	.sync_obj_signaled = &radeon_sync_obj_signaled,
  	.sync_obj_wait = &radeon_sync_obj_wait,
  	.sync_obj_flush = &radeon_sync_obj_flush,
@@ -784,52 +776,77 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
  	man->size = size >> PAGE_SHIFT;
  }
-static struct vm_operations_struct radeon_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
  static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
-	struct ttm_buffer_object *bo;
-	struct radeon_device *rdev;
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
  	int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
-	if (bo == NULL) {
-		return VM_FAULT_NOPAGE;
-	}
-	rdev = radeon_get_rdev(bo->bdev);
  	down_read(&rdev->pm.mclk_lock);
-	r = ttm_vm_ops->fault(vma, vmf);
+	r = ttm_bo_vm_fault(vma, vmf);
  	up_read(&rdev->pm.mclk_lock);
  	return r;
  }
+static void radeon_bo_vm_open(struct vm_area_struct *vma)
+{
+	struct radeon_bo *bo = container_of(vma->vm_private_data,
+					    struct radeon_bo, tbo);
+
+	drm_gem_object_reference(&bo->gem_base);
+}
+
+static void radeon_bo_vm_close(struct vm_area_struct *vma)
+{
+	struct radeon_bo *bo = container_of(vma->vm_private_data,
+					    struct radeon_bo, tbo);
+
+	drm_gem_object_unreference_unlocked(&bo->gem_base);
+	vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct radeon_ttm_vm_ops = {
+	.fault = radeon_ttm_fault,
+	.open = radeon_bo_vm_open,
+	.close = radeon_bo_vm_close
+};
+
  int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
  {
-	struct drm_file *file_priv;
+	struct drm_file *file_priv = filp->private_data;
  	struct radeon_device *rdev;
-	int r;
+	struct drm_vma_offset_node *node;
+	struct radeon_bo *bo;
+	int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
  		return drm_mmap(filp, vma);
  	}
- file_priv = filp->private_data;
  	rdev = file_priv->minor->dev->dev_private;
  	if (rdev == NULL) {
  		return -EINVAL;
  	}
-	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
-	if (unlikely(r != 0)) {
-		return r;
-	}
-	if (unlikely(ttm_vm_ops == NULL)) {
-		ttm_vm_ops = vma->vm_ops;
-		radeon_ttm_vm_ops = *ttm_vm_ops;
-		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+
+	drm_vma_offset_lock_lookup(&rdev->mman.bdev.vma_manager);
+	node = drm_vma_offset_lookup_locked(&rdev->mman.bdev.vma_manager,
+					    vma->vm_pgoff, vma_pages(vma));
+	if (node) {
+		bo = container_of(node, struct radeon_bo, tbo.vma_node);
+		ret = drm_vma_node_verify_access(&bo->gem_base.vma_node, filp);
+
+		if (!ret && !kref_get_unless_zero(&bo->gem_base.refcount))
+			ret = -EINVAL;
+	} else
+		ret = -EINVAL;
+	drm_vma_offset_unlock_lookup(&rdev->mman.bdev.vma_manager);
+
+	if (!ret) {
+		vma->vm_private_data = &bo->tbo;
+		vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+		vma->vm_ops = &radeon_ttm_vm_ops;
  	}
-	vma->vm_ops = &radeon_ttm_vm_ops;
-	return 0;
+	return ret;
  }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6a..d5ba8c56c131 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -139,7 +139,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
  	size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
-	BUG_ON(atomic_read(&bo->kref.refcount));
  	BUG_ON(atomic_read(&bo->cpu_writers));
  	BUG_ON(bo->sync_obj != NULL);
  	BUG_ON(bo->mem.mm_node != NULL);
@@ -619,10 +618,8 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
  	}
  }
-static void ttm_bo_release(struct kref *kref)
+void ttm_bo_release(struct ttm_buffer_object *bo)
  {
-	struct ttm_buffer_object *bo =
-	    container_of(kref, struct ttm_buffer_object, kref);
  	struct ttm_bo_device *bdev = bo->bdev;
  	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
@@ -633,15 +630,7 @@ static void ttm_bo_release(struct kref *kref)
  	ttm_bo_cleanup_refs_or_queue(bo);
  	kref_put(&bo->list_kref, ttm_bo_release_list);
  }
-
-void ttm_bo_unref(struct ttm_buffer_object **p_bo)
-{
-	struct ttm_buffer_object *bo = *p_bo;
-
-	*p_bo = NULL;
-	kref_put(&bo->kref, ttm_bo_release);
-}
-EXPORT_SYMBOL(ttm_bo_unref);
+EXPORT_SYMBOL(ttm_bo_release);
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  {
@@ -1116,7 +1105,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
  	}
  	bo->destroy = destroy;
- kref_init(&bo->kref);
  	kref_init(&bo->list_kref);
  	atomic_set(&bo->cpu_writers, 0);
  	INIT_LIST_HEAD(&bo->lru);
@@ -1165,7 +1153,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
  	ttm_bo_unreserve(bo);
if (unlikely(ret))
-		ttm_bo_unref(&bo);
+		ttm_bo_release(bo);
return ret;
  }
@@ -1200,34 +1188,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  }
  EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-int ttm_bo_create(struct ttm_bo_device *bdev,
-			unsigned long size,
-			enum ttm_bo_type type,
-			struct ttm_placement *placement,
-			uint32_t page_alignment,
-			bool interruptible,
-			struct file *persistent_swap_storage,
-			struct ttm_buffer_object **p_bo)
-{
-	struct ttm_buffer_object *bo;
-	size_t acc_size;
-	int ret;
-
-	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-	if (unlikely(bo == NULL))
-		return -ENOMEM;
-
-	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-			  interruptible, persistent_swap_storage, acc_size,
-			  NULL, NULL);
-	if (likely(ret == 0))
-		*p_bo = bo;
-
-	return ret;
-}
-EXPORT_SYMBOL(ttm_bo_create);
-
  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  					unsigned mem_type, bool allow_errors)
  {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c463c38b..9a39373676d5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -469,7 +469,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
  		fbo->sync_obj = NULL;
  	spin_unlock(&bdev->fence_lock);
  	kref_init(&fbo->list_kref);
-	kref_init(&fbo->kref);
  	fbo->destroy = &ttm_transfered_destroy;
  	fbo->acc_size = 0;
  	fbo->resv = &fbo->ttm_resv;
@@ -704,7 +703,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  			bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
-		ttm_bo_unref(&ghost_obj);
+		ttm_bo_release(ghost_obj);
  	}
*old_mem = *new_mem;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c87d686ff8cf..acb7c1348154 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -86,7 +86,7 @@ out_unlock:
  	return ret;
  }
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
  	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  	    vma->vm_private_data;
@@ -232,97 +232,4 @@ out_unlock:
  	ttm_bo_unreserve(bo);
  	return retval;
  }
-
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
-{
-	struct ttm_buffer_object *bo =
-	    (struct ttm_buffer_object *)vma->vm_private_data;
-
-	(void)ttm_bo_reference(bo);
-}
-
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
-{
-	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
-
-	ttm_bo_unref(&bo);
-	vma->vm_private_data = NULL;
-}
-
-static const struct vm_operations_struct ttm_bo_vm_ops = {
-	.fault = ttm_bo_vm_fault,
-	.open = ttm_bo_vm_open,
-	.close = ttm_bo_vm_close
-};
-
-static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
-						  unsigned long offset,
-						  unsigned long pages)
-{
-	struct drm_vma_offset_node *node;
-	struct ttm_buffer_object *bo = NULL;
-
-	drm_vma_offset_lock_lookup(&bdev->vma_manager);
-
-	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
-	if (likely(node)) {
-		bo = container_of(node, struct ttm_buffer_object, vma_node);
-		if (!kref_get_unless_zero(&bo->kref))
-			bo = NULL;
-	}
-
-	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
-
-	if (!bo)
-		pr_err("Could not find buffer object to map\n");
-
-	return bo;
-}
-
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
-		struct ttm_bo_device *bdev)
-{
-	struct ttm_bo_driver *driver;
-	struct ttm_buffer_object *bo;
-	int ret;
-
-	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
-	if (unlikely(!bo))
-		return -EINVAL;
-
-	driver = bo->bdev->driver;
-	if (unlikely(!driver->verify_access)) {
-		ret = -EPERM;
-		goto out_unref;
-	}
-	ret = driver->verify_access(bo, filp);
-	if (unlikely(ret != 0))
-		goto out_unref;
-
-	vma->vm_ops = &ttm_bo_vm_ops;
-
-	/*
-	 * Note: We're transferring the bo reference to
-	 * vma->vm_private_data here.
-	 */
-
-	vma->vm_private_data = bo;
-	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
-	return 0;
-out_unref:
-	ttm_bo_unref(&bo);
-	return ret;
-}
-EXPORT_SYMBOL(ttm_bo_mmap);
-
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
-{
-	if (vma->vm_pgoff != 0)
-		return -EACCES;
-
-	vma->vm_ops = &ttm_bo_vm_ops;
-	vma->vm_private_data = ttm_bo_reference(bo);
-	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
-	return 0;
-}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_vm_fault);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..2503c65c274f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -593,14 +593,6 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
  	*placement = vmw_sys_placement;
  }
-static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
-	struct ttm_object_file *tfile =
-		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
-
-	return vmw_user_dmabuf_verify_access(bo, tfile);
-}
-
  static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  {
  	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -683,7 +675,6 @@ struct ttm_bo_driver vmw_bo_driver = {
  	.init_mem_type = vmw_init_mem_type,
  	.evict_flags = vmw_evict_flags,
  	.move = NULL,
-	.verify_access = vmw_verify_access,
  	.sync_obj_signaled = vmw_sync_obj_signaled,
  	.sync_obj_wait = vmw_sync_obj_wait,
  	.sync_obj_flush = vmw_sync_obj_flush,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 814665b7a117..65a95b71e5d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -299,14 +299,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
   */
  static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  {
-	return ttm_bo_create(&dev_priv->bdev,
-			     PAGE_SIZE,
-			     ttm_bo_type_device,
-			     &vmw_vram_sys_placement,
-			     0, false, NULL,
-			     &dev_priv->dummy_query_bo);
-}
+	struct vmw_dma_buffer *vmw_bo = kzalloc(sizeof(*vmw_bo), GFP_KERNEL);
+	int ret;
+
+	if (!vmw_bo)
+		return -ENOMEM;
+ ret = vmw_dmabuf_init(dev_priv, vmw_bo, PAGE_SIZE,
+			      &vmw_vram_sys_placement, false,
+			      &vmw_dmabuf_bo_free);
+	if (!ret)
+		dev_priv->dummy_query_bo = &vmw_bo->base;
+	return ret;
+}
static int vmw_request_device(struct vmw_private *dev_priv)
  {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5dbcb96..e9fea03b1899 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,6 +67,7 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
  	struct ttm_buffer_object base;
+	struct kref refcount;
  	struct list_head res_list;
  };
@@ -511,6 +512,7 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
  			     uint32_t handle, int *id);
  extern int vmw_surface_validate(struct vmw_private *dev_priv,
  				struct vmw_surface *srf);
+extern void vmw_dmabuf_bo_release(struct kref *refcount);
  extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
  extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
  			   struct vmw_dma_buffer *vmw_bo,
@@ -842,18 +844,34 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
  	struct vmw_dma_buffer *tmp_buf = *buf;
*buf = NULL;
-	if (tmp_buf != NULL) {
-		struct ttm_buffer_object *bo = &tmp_buf->base;
-
-		ttm_bo_unref(&bo);
-	}
+	if (tmp_buf != NULL)
+		kref_put(&tmp_buf->refcount, &vmw_dmabuf_bo_release);
  }
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
  {
-	if (ttm_bo_reference(&buf->base))
-		return buf;
-	return NULL;
+	kref_get(&buf->refcount);
+	return buf;
+}
+
+static inline void ttm_bo_unref(struct ttm_buffer_object **bo)
+{
+	struct vmw_dma_buffer *buf;
+
+	if (!*bo)
+		return;
+
+	buf = container_of(*bo, struct vmw_dma_buffer, base);
+	*bo = NULL;
+	kref_put(&buf->refcount, &vmw_dmabuf_bo_release);
+}
+
+static inline struct ttm_buffer_object *ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+	struct vmw_dma_buffer *vmw_bo;
+
+	vmw_bo = container_of(bo, struct vmw_dma_buffer, base);
+	return &vmw_dmabuf_reference(vmw_bo)->base;
  }
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index d568432bec22..ecc162b788aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -357,6 +357,14 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  	kfree(vmw_bo);
  }
+void vmw_dmabuf_bo_release(struct kref *refcount)
+{
+	struct vmw_dma_buffer *vmw_bo;
+
+	vmw_bo = container_of(refcount, struct vmw_dma_buffer, refcount);;
+	ttm_bo_release(&vmw_bo->base);
+}
+
  int vmw_dmabuf_init(struct vmw_private *dev_priv,
  		    struct vmw_dma_buffer *vmw_bo,
  		    size_t size, struct ttm_placement *placement,
@@ -373,6 +381,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
  	memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
+	kref_init(&vmw_bo->refcount);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  			  ttm_bo_type_device, placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..abba9b341e5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -28,21 +28,71 @@
  #include <drm/drmP.h>
  #include "vmwgfx_drv.h"
+static void vmw_bo_vm_open(struct vm_area_struct *vma)
+{
+	struct vmw_dma_buffer *vmw_bo;
+
+	vmw_bo = container_of(vma->vm_private_data,
+			      struct vmw_dma_buffer, base);
+
+	vmw_dmabuf_reference(vmw_bo);
+}
+
+static void vmw_bo_vm_close(struct vm_area_struct *vma)
+{
+	struct vmw_dma_buffer *vmw_bo;
+
+	vmw_bo = container_of(vma->vm_private_data,
+			      struct vmw_dma_buffer, base);
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct vmw_ttm_vm_ops = {
+	.fault = ttm_bo_vm_fault,
+	.open = vmw_bo_vm_open,
+	.close = vmw_bo_vm_close
+};
+
  int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
  {
-	struct drm_file *file_priv;
-	struct vmw_private *dev_priv;
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+	struct drm_vma_offset_node *node;
+	struct vmw_dma_buffer *vmw_bo;
+	int ret;
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
  		DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
  		return -EINVAL;
  	}
- file_priv = filp->private_data;
-	dev_priv = vmw_priv(file_priv->minor->dev);
-	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
-}
+	drm_vma_offset_lock_lookup(&dev_priv->bdev.vma_manager);
+	node = drm_vma_offset_lookup_locked(&dev_priv->bdev.vma_manager,
+					    vma->vm_pgoff, vma_pages(vma));
+	if (node) {
+		struct ttm_object_file *tfile =
+			vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+		vmw_bo = container_of(vma->vm_private_data,
+				      struct vmw_dma_buffer, base.vma_node);
+
+		ret = vmw_user_dmabuf_verify_access(&vmw_bo->base, tfile);
+ if (!ret && !kref_get_unless_zero(&vmw_bo->refcount))
+			ret = -EINVAL;
+	} else
+		ret = -EINVAL;
+	drm_vma_offset_unlock_lookup(&dev_priv->bdev.vma_manager);
+
+	if (!ret) {
+		vma->vm_private_data = &vmw_bo->base;
+		vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+		vma->vm_ops = &vmw_ttm_vm_ops;
+	}
+	return ret;
+}
  static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
  {
  	DRM_INFO("global init.\n");
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index c18a593d1744..905b052705e6 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -245,6 +245,8 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
   * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
   * verify_access() callbacks.
   *
+ * This call requires the vm_lock to be held.
+ *
   * RETURNS:
   * 0 if access is granted, -EACCES otherwise.
   */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 751eaffbf0d5..07725c293a9b 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -197,8 +197,6 @@ struct ttm_buffer_object {
  	/**
  	* Members not needing protection.
  	*/
-
-	struct kref kref;
  	struct kref list_kref;
/**
@@ -279,21 +277,6 @@ struct ttm_bo_kmap_obj {
  };
/**
- * ttm_bo_reference - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
- *
- * Returns a refcounted pointer to a buffer object.
- */
-
-static inline struct ttm_buffer_object *
-ttm_bo_reference(struct ttm_buffer_object *bo)
-{
-	kref_get(&bo->kref);
-	return bo;
-}
-
-/**
   * ttm_bo_wait - wait for buffer idle.
   *
   * @bo:  The buffer object.
@@ -331,13 +314,13 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
  				bool no_wait_gpu);
/**
- * ttm_bo_unref
+ * ttm_bo_release
   *
   * @bo: The buffer object.
   *
- * Unreference and clear a pointer to a buffer object.
+ * Release and destroy a buffer object.
   */
-extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+extern void ttm_bo_release(struct ttm_buffer_object *bo);
/**
@@ -483,41 +466,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
  			void (*destroy) (struct ttm_buffer_object *));
/**
- * ttm_bo_synccpu_object_init
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-
-extern int ttm_bo_create(struct ttm_bo_device *bdev,
-				unsigned long size,
-				enum ttm_bo_type type,
-				struct ttm_placement *placement,
-				uint32_t page_alignment,
-				bool interruptible,
-				struct file *persistent_swap_storage,
-				struct ttm_buffer_object **p_bo);
-
-/**
   * ttm_bo_check_placement
   *
   * @bo:		the buffer object.
@@ -649,57 +597,16 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
  extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
- * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ * ttm_bo_vm_fault - fault handler for mmap
   *
- * @vma:       vma as input from the fbdev mmap method.
- * @bo:        The bo backing the address space. The address space will
- * have the same size as the bo, and start at offset 0.
- *
- * This function is intended to be called by the fbdev mmap method
- * if the fbdev address space is to be backed by a bo.
- */
-
-extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
-			  struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
- *
- * @filp:      filp as input from the mmap method.
   * @vma:       vma as input from the mmap method.
- * @bdev:      Pointer to the ttm_bo_device with the address space manager.
+ * @vm_fault:  passed by the mmap fault handler
   *
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
- */
-
-extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
-		       struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_io
- *
- * @bdev:      Pointer to the struct ttm_bo_device.
- * @filp:      Pointer to the struct file attempting to read / write.
- * @wbuf:      User-space pointer to address of buffer to write. NULL on read.
- * @rbuf:      User-space pointer to address of buffer to read into.
- * Null on write.
- * @count:     Number of bytes to read / write.
- * @f_pos:     Pointer to current file position.
- * @write:     1 for read, 0 for write.
- *
- * This function implements read / write into ttm buffer objects, and is
- * intended to
- * be called from the fops::read and fops::write method.
- * Returns:
- * See man (2) write, man(2) read. In particular,
- * the function may return -ERESTARTSYS if
- * interrupted by a signal.
+ * This function is intended to be called by the mmap fault method.
+ * With vma->vm_private set to the faulting bo
   */
-extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
-			 const char __user *wbuf, char __user *rbuf,
-			 size_t count, loff_t *f_pos, bool write);
+int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8639c85d61c4..02581f10ca3e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -401,21 +401,6 @@ struct ttm_bo_driver {
  		     struct ttm_mem_reg *new_mem);
/**
-	 * struct ttm_bo_driver_member verify_access
-	 *
-	 * @bo: Pointer to a buffer object.
-	 * @filp: Pointer to a struct file trying to access the object.
-	 *
-	 * Called from the map / write / read methods to verify that the
-	 * caller is permitted to access the buffer object.
-	 * This member may be set to NULL, which will refuse this kind of
-	 * access for all buffer objects.
-	 * This function should return 0 if access is granted, -EPERM otherwise.
-	 */
-	int (*verify_access) (struct ttm_buffer_object *bo,
-			      struct file *filp);
-
-	/**
  	 * In case a driver writer dislikes the TTM fence objects,
  	 * the driver writer can replace those with sync objects of
  	 * his / her own. If it turns out that no driver writer is
@@ -875,8 +860,6 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
  {
  	int ret;
- WARN_ON(!atomic_read(&bo->kref.refcount));
-
  	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
  				    ticket);
  	if (likely(ret == 0))
@@ -901,8 +884,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
  {
  	int ret = 0;
- WARN_ON(!atomic_read(&bo->kref.refcount));
-
  	if (interruptible)
  		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
  						       ticket);

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux