+
+ /** @rsvd: Reserved, MBZ */
+ __u32 rsvd;
+
+ /** @handle: Object handle */
+ __u32 handle;
+
+ /** @start: Virtual Address start to bind */
+ __u64 start;
+
+ /** @offset: Offset in object to bind */
+ __u64 offset;
+
+ /** @length: Length of mapping to bind */
+ __u64 length;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_GEM_VM_BIND_READONLY:
+ * Mapping is read-only.
+ *
+ * I915_GEM_VM_BIND_CAPTURE:
+ * Capture this mapping in the dump upon GPU error.
+ */
+ __u64 flags;
+#define I915_GEM_VM_BIND_READONLY (1 << 0)
+#define I915_GEM_VM_BIND_CAPTURE (1 << 1)
+
+ /**
+ * @extensions: 0-terminated chain of extensions for this
operation.
+ *
+ * I915_VM_BIND_EXT_TIMELINE_FENCES:
+ * Specifies an array of input or output timeline fences for this
+ * binding operation. See struct
drm_i915_vm_bind_ext_timeline_fences.
+ *
+ * I915_VM_BIND_EXT_USER_FENCES:
+ * Specifies an array of input or output user fences for this
+ * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
+ * This is required for compute contexts.
+ */
+ __u64 extensions;
+#define I915_VM_BIND_EXT_TIMELINE_FENCES 0
+#define I915_VM_BIND_EXT_USER_FENCES 1
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU
virtual
+ * address (VA) range that should be unbound from the device page
table of the
+ * specified address space (VM). The specified VA range must match
one of the
+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
+ * completion.
+ *
+ * The @queue_idx specifies the queue to use for unbinding.
+ * See struct drm_i915_gem_vm_unbind for more information on
@queue_idx.
+ *
+ * The @start and @length musy specify a unique mapping bound with
VM_BIND
+ * ioctl.
+ */
+struct drm_i915_gem_vm_unbind {
+ /** @vm_id: VM (address space) id to bind */
+ __u32 vm_id;
+
+ /** @queue_idx: Index of queue for unbinding */
+ __u32 queue_idx;
+
+ /** @start: Virtual Address start to unbind */
+ __u64 start;
+
+ /** @length: Length of mapping to unbind */
+ __u64 length;
+
+ /** @flags: Reserved for future usage, currently MBZ */
+ __u64 flags;
+
+ /**
+ * @extensions: 0-terminated chain of extensions for this
operation.
+ *
+ * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
+ * Specifies an array of input or output timeline fences for this
+ * unbind operation.
+ * It has same format as struct
drm_i915_vm_bind_ext_timeline_fences.
+ *
+ * I915_VM_UNBIND_EXT_USER_FENCES:
+ * Specifies an array of input or output user fences for this
+ * unbind operation. This is required for compute contexts.
+ * It has same format as struct drm_i915_vm_bind_ext_user_fence.
+ */
+ __u64 extensions;
+#define I915_VM_UNBIND_EXT_TIMELINE_FENCES 0
+#define I915_VM_UNBIND_EXT_USER_FENCES 1
+};
+
+/**
+ * struct drm_i915_vm_bind_fence - An input or output fence for the
vm_bind
+ * or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for input fence to
signal
+ * before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the returned
output fence
+ * after the completion of binding or unbinding.
+ */
+struct drm_i915_vm_bind_fence {
+ /** @handle: User's handle for a drm_syncobj to wait on or
signal. */
+ __u32 handle;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_VM_BIND_FENCE_WAIT:
+ * Wait for the input fence before binding/unbinding
+ *
+ * I915_VM_BIND_FENCE_SIGNAL:
+ * Return bind/unbind completion fence as output
+ */
+ __u32 flags;
+#define I915_VM_BIND_FENCE_WAIT (1<<0)
+#define I915_VM_BIND_FENCE_SIGNAL (1<<1)
+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS
(-(I915_VM_BIND_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for
vm_bind
+ * and vm_unbind.
+ *
+ * This structure describes an array of timeline drm_syncobj and
associated
+ * points for timeline variants of drm_syncobj. These timeline
'drm_syncobj's
+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
+ */
+struct drm_i915_vm_bind_ext_timeline_fences {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @fence_count: Number of elements in the @handles_ptr &
@value_ptr
+ * arrays.
+ */
+ __u64 fence_count;
+
+ /**
+ * @handles_ptr: Pointer to an array of struct
drm_i915_vm_bind_fence
+ * of length @fence_count.
+ */
+ __u64 handles_ptr;
+
+ /**
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_vm_bind_user_fence - An input or output user
fence for the
+ * vm_bind or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for the input
fence (value at
+ * @addr to become equal to @val) before starting the binding or
unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the output
fence after
+ * the completion of binding or unbinding by writing @val to memory
location at
+ * @addr
+ */
+struct drm_i915_vm_bind_user_fence {
+ /** @addr: User/Memory fence qword aligned process virtual
address */
+ __u64 addr;
+
+ /** @val: User/Memory fence value to be written after bind
completion */
+ __u64 val;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_VM_BIND_USER_FENCE_WAIT:
+ * Wait for the input fence before binding/unbinding
+ *
+ * I915_VM_BIND_USER_FENCE_SIGNAL:
+ * Return bind/unbind completion fence as output
+ */
+ __u32 flags;
+#define I915_VM_BIND_USER_FENCE_WAIT (1<<0)
+#define I915_VM_BIND_USER_FENCE_SIGNAL (1<<1)
+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
+ (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for
vm_bind
+ * and vm_unbind.
+ *
+ * These user fences can be input or output fences
+ * (See struct drm_i915_vm_bind_user_fence).
+ */
+struct drm_i915_vm_bind_ext_user_fence {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @fence_count: Number of elements in the @user_fence_ptr
array. */
+ __u64 fence_count;
+
+ /**
+ * @user_fence_ptr: Pointer to an array of
+ * struct drm_i915_vm_bind_user_fence of length @fence_count.
+ */
+ __u64 user_fence_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer3 - Structure for
DRM_I915_GEM_EXECBUFFER3
+ * ioctl.
+ *
+ * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and
VM_BIND mode
+ * only works with this ioctl for submission.
+ * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
+ */
+struct drm_i915_gem_execbuffer3 {
+ /**
+ * @ctx_id: Context id
+ *
+ * Only contexts with user engine map are allowed.
+ */
+ __u32 ctx_id;
+
+ /**
+ * @engine_idx: Engine index
+ *
+ * An index in the user engine map of the context specified by
@ctx_id.
+ */
+ __u32 engine_idx;
+
+ /** @rsvd1: Reserved, MBZ */
+ __u32 rsvd1;
+
+ /**
+ * @batch_count: Number of batches in @batch_address array.
+ *
+ * 0 is invalid. For parallel submission, it should be equal to the
+ * number of (parallel) engines involved in that submission.
+ */
+ __u32 batch_count;
+
+ /**
+ * @batch_address: Array of batch gpu virtual addresses.
+ *
+ * If @batch_count is 1, then it is the gpu virtual address of the
+ * batch buffer. If @batch_count > 1, then it is a pointer to an
array
+ * of batch buffer gpu virtual addresses.
+ */
+ __u64 batch_address;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC3_SECURE:
+ * Request a privileged ("secure") batch buffer/s.
+ * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+ __u64 flags;
+#define I915_EXEC3_SECURE (1<<0)
+
+ /** @rsvd2: Reserved, MBZ */
+ __u64 rsvd2;
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
+ * It has same format as
DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
+ * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
+ *
+ * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
+ * First level batch completion signaling extension.
+ * See struct drm_i915_gem_execbuffer3_ext_user_fence.
+ */
+ __u64 extensions;
+#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES 0
+#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE 1
+};
+
+/**
+ * struct drm_i915_gem_execbuffer3_ext_user_fence - First level
batch completion
+ * signaling extension.
+ *
+ * This extension allows user to attach a user fence (@addr, @value
pair) to
+ * execbuf3, to be signaled by the command streamer after the
completion of first
+ * level batch, by writing the @value at specified @addr and
triggering an
+ * interrupt.
+ * User can either poll for this user fence to signal or can also
wait on it
+ * with i915_gem_wait_user_fence ioctl.
+ * This is very much usefaul for long running contexts where waiting
on dma-fence
+ * by user (like i915_gem_wait ioctl) is not supported.
+ */
+struct drm_i915_gem_execbuffer3_ext_user_fence {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @addr: User/Memory fence qword aligned GPU virtual address.
+ *
+ * Address has to be a valid GPU virtual address at the time of
+ * first level batch completion.
+ */
+ __u64 addr;
+
+ /**
+ * @value: User/Memory fence Value to be written to above address
+ * after first level batch completes.
+ */
+ __u64 value;
+
+ /** @rsvd: Reserved, MBZ */
+ __u64 rsvd;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the
object
+ * private to the specified VM.
+ *
+ * See struct drm_i915_gem_create_ext.
+ */
+struct drm_i915_gem_create_ext_vm_private {
+#define I915_GEM_CREATE_EXT_VM_PRIVATE 2
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @vm_id: Id of the VM to which the object is private */
+ __u32 vm_id;
+};
+
+/**
+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
+ *
+ * User/Memory fence can be woken up either by:
+ *
+ * 1. GPU context indicated by @ctx_id, or,
+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
+ * @ctx_id is ignored when this flag is set.
+ *
+ * Wakeup condition is,
+ * ``((*addr & mask) op (value & mask))``
+ *
+ * See :ref:`Documentation/driver-api/dma-buf.rst
<indefinite_dma_fences>`
+ */
+struct drm_i915_gem_wait_user_fence {
+ /** @extensions: Zero-terminated chain of extensions. */
+ __u64 extensions;
+
+ /** @addr: User/Memory fence address */
+ __u64 addr;
+
+ /** @ctx_id: Id of the Context which will signal the fence. */
+ __u32 ctx_id;
+
+ /** @op: Wakeup condition operator */
+ __u16 op;
+#define I915_UFENCE_WAIT_EQ 0
+#define I915_UFENCE_WAIT_NEQ 1
+#define I915_UFENCE_WAIT_GT 2
+#define I915_UFENCE_WAIT_GTE 3
+#define I915_UFENCE_WAIT_LT 4
+#define I915_UFENCE_WAIT_LTE 5
+#define I915_UFENCE_WAIT_BEFORE 6
+#define I915_UFENCE_WAIT_AFTER 7
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_UFENCE_WAIT_SOFT:
+ *
+ * To be woken up by i915 driver async worker (not by GPU).
+ *
+ * I915_UFENCE_WAIT_ABSTIME:
+ *
+ * Wait timeout specified as absolute time.
+ */
+ __u16 flags;
+#define I915_UFENCE_WAIT_SOFT 0x1
+#define I915_UFENCE_WAIT_ABSTIME 0x2
+
+ /** @value: Wakeup value */
+ __u64 value;
+
+ /** @mask: Wakeup mask */
+ __u64 mask;
+#define I915_UFENCE_WAIT_U8 0xffu
+#define I915_UFENCE_WAIT_U16 0xffffu
+#define I915_UFENCE_WAIT_U32 0xfffffffful
+#define I915_UFENCE_WAIT_U64 0xffffffffffffffffull
+
+ /**
+ * @timeout: Wait timeout in nanoseconds.
+ *
+ * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is
the
+ * absolute time in nsec.
+ */
+ __s64 timeout;
+};