+
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+ if (ret)
+ continue;
+
+ ret = i915_vma_pin_ww(vma, &ww, 0, 0, pin_flags);
+ if (ret)
+ continue;
+
+ /* Make it evictable */
+ __i915_vma_unpin(vma);
+
+ list_add_tail(&vma->vm_bind_link, &vm->vm_bound_list);
+ i915_vm_bind_it_insert(vma, &vm->va);
+
+ /* Hold object reference until vm_unbind */
+ i915_gem_object_get(vma->obj);
+ }
+
+ if (ret)
+ i915_vma_destroy(vma);
+unlock_vm:
+ mutex_unlock(&vm->vm_bind_lock);
+put_obj:
+ i915_gem_object_put(obj);
+
+ return ret;
+}
+
+/**
+ * i915_gem_vm_bind_ioctl() - ioctl function for binding a section
of object
+ * at a specified virtual address
+ * @dev: drm_device pointer
+ * @data: ioctl data structure
+ * @file: drm_file pointer
+ *
+ * Adds the specified persistent mapping (virtual address to a
section of an
+ * object) and binds it in the device page table.
+ *
+ * Returns 0 on success, error code on failure.
+ */
+int i915_gem_vm_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_vm_bind *args = data;
+ struct i915_address_space *vm;
+ int ret;
+
+ vm = i915_gem_vm_lookup(file->driver_priv, args->vm_id);
+ if (unlikely(!vm))
+ return -ENOENT;
+
+ ret = i915_gem_vm_bind_obj(vm, args, file);
+
+ i915_vm_put(vm);
+ return ret;
+}
+
+/**
+ * i915_gem_vm_unbind_ioctl() - ioctl function for unbinding a
mapping at a
+ * specified virtual address
+ * @dev: drm_device pointer
+ * @data: ioctl data structure
+ * @file: drm_file pointer
+ *
+ * Removes the persistent mapping at the specified address and
unbinds it
+ * from the device page table.
+ *
+ * Returns 0 on success, error code on failure. -ENOENT is returned
if the
+ * specified mapping is not found.
+ */
+int i915_gem_vm_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_vm_unbind *args = data;
+ struct i915_address_space *vm;
+ int ret;
+
+ vm = i915_gem_vm_lookup(file->driver_priv, args->vm_id);
+ if (unlikely(!vm))
+ return -ENOENT;
+
+ ret = i915_gem_vm_unbind_vma(vm, args);
+
+ i915_vm_put(vm);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c
b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b67831833c9a..0daa70c6ed0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -12,6 +12,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_vm_bind.h"
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_gt.h"
@@ -176,6 +177,8 @@ int i915_vm_lock_objects(struct
i915_address_space *vm,
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&vm->va.rb_root));
+ mutex_destroy(&vm->vm_bind_lock);
}
/**
@@ -202,6 +205,8 @@ static void __i915_vm_release(struct work_struct
*work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
+ i915_gem_vm_unbind_all(vm);
+
__i915_vm_close(vm);
/* Synchronize async unbinds. */
@@ -282,6 +287,11 @@ void i915_address_space_init(struct
i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->bound_list);
INIT_LIST_HEAD(&vm->unbound_list);
+
+ vm->va = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&vm->vm_bind_list);
+ INIT_LIST_HEAD(&vm->vm_bound_list);
+ mutex_init(&vm->vm_bind_lock);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h
b/drivers/gpu/drm/i915/gt/intel_gtt.h
index c0ca53cba9f0..b52061858161 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -259,6 +259,23 @@ struct i915_address_space {
*/
struct list_head unbound_list;
+ /**
+ * @vm_bind_mode: flag to indicate vm_bind method of binding
+ *
+ * True: allow only vm_bind method of binding.
+ * False: allow only legacy execbuff method of binding.
+ */
+ bool vm_bind_mode:1;
+
+ /** @vm_bind_lock: Mutex to protect @vm_bind_list and
@vm_bound_list */
+ struct mutex vm_bind_lock;
+ /** @vm_bind_list: List of vm_binding in process */
+ struct list_head vm_bind_list;
+ /** @vm_bound_list: List of vm_binding completed */
+ struct list_head vm_bound_list;
+ /* @va: tree of persistent vmas */
+ struct rb_root_cached va;
+
/* Global GTT */
bool is_ggtt:1;
diff --git a/drivers/gpu/drm/i915/i915_driver.c
b/drivers/gpu/drm/i915/i915_driver.c
index fb3826dabe8b..c3a9a5031cdb 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -69,6 +69,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_pm.h"
+#include "gem/i915_gem_vm_bind.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
@@ -1893,6 +1894,8 @@ static const struct drm_ioctl_desc
i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY,
i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl,
DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_UNBIND, i915_gem_vm_unbind_ioctl,
DRM_RENDER_ALLOW),
};
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.c
b/drivers/gpu/drm/i915/i915_vma.c
index 5839e1f55f00..33f910473263 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -239,6 +239,7 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
mutex_unlock(&vm->mutex);
+ INIT_LIST_HEAD(&vma->vm_bind_link);
return vma;
err_unlock:
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h
b/drivers/gpu/drm/i915/i915_vma_types.h
index 2200f1f103ba..f56ac07c6cfa 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -296,6 +296,20 @@ struct i915_vma {
/** This object's place on the active/inactive lists */
struct list_head vm_link;
+ /** @vm_bind_link: node for the vm_bind related lists of vm */
+ struct list_head vm_bind_link;
+
+ /** Interval tree structures for persistent vma */
+
+ /** @rb: node for the interval tree of vm for persistent vmas */
+ struct rb_node rb;
+ /** @start: start endpoint of the rb node */
+ u64 start;
+ /** @last: Last endpoint of the rb node */
+ u64 last;
+ /** @__subtree_last: last in subtree */
+ u64 __subtree_last;
+
struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
struct hlist_node obj_hash;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 520ad2691a99..9760564b4693 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -470,6 +470,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
#define DRM_I915_GEM_CREATE_EXT 0x3c
+#define DRM_I915_GEM_VM_BIND 0x3d
+#define DRM_I915_GEM_VM_UNBIND 0x3e
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE +
DRM_I915_INIT, drm_i915_init_t)
@@ -534,6 +536,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE +
DRM_I915_QUERY, struct drm_i915_query)
#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE +
DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE +
DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE +
DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE +
DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_unbind)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -3717,6 +3721,114 @@ struct
drm_i915_gem_create_ext_protected_content {
/* ID of the protected content session managed by i915 when PXP is
active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+/**
+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
+ *
+ * This structure is passed to VM_BIND ioctl and specifies the
mapping of GPU
+ * virtual address (VA) range to the section of an object that
should be bound
+ * in the device page table of the specified address space (VM).
+ * The VA range specified must be unique (ie., not currently bound)
and can
+ * be mapped to whole object or a section of the object (partial
binding).
+ * Multiple VA mappings can be created to the same section of the
object
+ * (aliasing).
+ *
+ * The @start, @offset and @length must be 4K page aligned. However
the DG2
+ * and XEHPSDV has 64K page size for device local memory and has
compact page
+ * table. On those platforms, for binding device local-memory
objects, the
+ * @start, @offset and @length must be 64K aligned. Also, UMDs
should not mix
+ * the local memory 64K page and the system memory 4K page bindings
in the same
+ * 2M range.
+ *
+ * Error code -EINVAL will be returned if @start, @offset and
@length are not
+ * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION),
error code
+ * -ENOSPC will be returned if the VA range specified can't be
reserved.
+ *
+ * VM_BIND/UNBIND ioctl calls executed on different CPU threads
concurrently
+ * are not ordered. Furthermore, parts of the VM_BIND operation can
be done
+ * asynchronously, if valid @fence is specified.
+ */
+struct drm_i915_gem_vm_bind {
+ /** @vm_id: VM (address space) id to bind */
+ __u32 vm_id;
+
+ /** @handle: Object handle */
+ __u32 handle;
+
+ /** @start: Virtual Address start to bind */
+ __u64 start;
+
+ /** @offset: Offset in object to bind */
+ __u64 offset;
+
+ /** @length: Length of mapping to bind */
+ __u64 length;
+
+ /**
+ * @flags: Currently reserved, MBZ.
+ *
+ * Note that @fence carries its own flags.
+ */
+ __u64 flags;
+
+ /** @rsvd: Reserved, MBZ */
+ __u64 rsvd[2];
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * For future extensions. See struct i915_user_extension.
+ */
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU
virtual
+ * address (VA) range that should be unbound from the device page
table of the
+ * specified address space (VM). VM_UNBIND will force unbind the
specified
+ * range from device page table without waiting for any GPU job to
complete.
+ * It is UMDs responsibility to ensure the mapping is no longer in
use before
+ * calling VM_UNBIND.
+ *
+ * If the specified mapping is not found, the ioctl will simply
return without
+ * any error.
+ *
+ * VM_BIND/UNBIND ioctl calls executed on different CPU threads
concurrently
+ * are not ordered. Furthermore, parts of the VM_UNBIND operation
can be done
+ * asynchronously, if valid @fence is specified.
+ */
+struct drm_i915_gem_vm_unbind {
+ /** @vm_id: VM (address space) id to bind */
+ __u32 vm_id;
+
+ /** @rsvd: Reserved, MBZ */
+ __u32 rsvd;
+
+ /** @start: Virtual Address start to unbind */
+ __u64 start;
+
+ /** @length: Length of mapping to unbind */
+ __u64 length;
+
+ /**
+ * @flags: Currently reserved, MBZ.
+ *
+ * Note that @fence carries its own flags.
+ */
+ __u64 flags;
+
+ /** @rsvd2: Reserved, MBZ */
+ __u64 rsvd2[2];
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * For future extensions. See struct i915_user_extension.
+ */
+ __u64 extensions;
+};
+
#if defined(__cplusplus)
}
#endif