This died many years ago as we now use i915_vma first and foremost. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Cc: Matthew Auld <matthew.auld@xxxxxxxxx> --- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 13 +++---- drivers/gpu/drm/i915/i915_drv.h | 8 ----- drivers/gpu/drm/i915/i915_gem.c | 36 +++++++------------ 3 files changed, 19 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index f0998f1225af..f09043a3ea9d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1995,28 +1995,29 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) static struct i915_vma * shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = eb->i915; struct i915_vma * const vma = *eb->vma; - struct i915_address_space *vm; u64 flags; + int err; /* * PPGTT backed shadow buffers must be mapped RO, to prevent * post-scan tampering */ - if (CMDPARSER_USES_GGTT(dev_priv)) { + if (CMDPARSER_USES_GGTT(eb->i915)) { flags = PIN_GLOBAL; - vm = &dev_priv->ggtt.vm; } else if (vma->vm->has_read_only) { flags = PIN_USER; - vm = vma->vm; i915_gem_object_set_readonly(obj); } else { DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); return ERR_PTR(-EINVAL); } - return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return ERR_PTR(err); + + return vma; } static struct i915_vma *eb_parse(struct i915_execbuffer *eb) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1779f600fcfb..a70555e6befb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1842,14 +1842,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) -struct i915_vma * __must_check -i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view, - u64 size, - u64 alignment, - u64 flags); - void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); static inline int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 43c532756c7c..89696f1a4e77 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -891,22 +891,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_address_space *vm = &dev_priv->ggtt.vm; - - return i915_gem_object_pin(obj, vm, view, size, alignment, - flags | PIN_GLOBAL); -} - -struct i915_vma * -i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view, - u64 size, - u64 alignment, - u64 flags) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; int ret; @@ -915,17 +901,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (flags & PIN_MAPPABLE && (!view || view->type == I915_GGTT_VIEW_NORMAL)) { - /* If the required space is larger than the available + /* + * If the required space is larger than the available * aperture, we will not able to find a slot for the * object and unbinding the object now will be in * vain. Worse, doing so may cause us to ping-pong * the object in and out of the Global GTT and * waste a lot of cycles under the mutex. */ - if (obj->base.size > dev_priv->ggtt.mappable_end) + if (obj->base.size > ggtt->mappable_end) return ERR_PTR(-E2BIG); - /* If NONBLOCK is set the caller is optimistically + /* + * If NONBLOCK is set the caller is optimistically * trying to cache the full object within the mappable * aperture, and *must* have a fallback in place for * situations where we cannot bind the object. We @@ -941,11 +929,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, * we could try to minimise harm to others. */ if (flags & PIN_NONBLOCK && - obj->base.size > dev_priv->ggtt.mappable_end / 2) + obj->base.size > ggtt->mappable_end / 2) return ERR_PTR(-ENOSPC); } - vma = i915_vma_instance(obj, vm, view); + vma = i915_vma_instance(obj, &ggtt->vm, view); if (IS_ERR(vma)) return vma; @@ -955,7 +943,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOSPC); if (flags & PIN_MAPPABLE && - vma->fence_size > dev_priv->ggtt.mappable_end / 2) + vma->fence_size > ggtt->mappable_end / 2) return ERR_PTR(-ENOSPC); } @@ -965,9 +953,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (vma->fence && !i915_gem_object_is_tiled(obj)) { - mutex_lock(&vma->vm->mutex); + mutex_lock(&ggtt->vm.mutex); ret = i915_vma_revoke_fence(vma); - mutex_unlock(&vma->vm->mutex); + mutex_unlock(&ggtt->vm.mutex); if (ret) return ERR_PTR(ret); } -- 2.24.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx