This was not done as a straight copy because reset_stats IOCTL landed in libdrm before upstream kernel. (We'll do a similar thing for full PPGTT anyway, so there isn't really a point in syncing exactly). Signed-off-by: Ben Widawsky <ben@xxxxxxxxxxxx> --- include/drm/i915_drm.h | 113 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 100 insertions(+), 13 deletions(-) diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index c1914d6..2f4eb8c 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -27,12 +27,36 @@ #ifndef _I915_DRM_H_ #define _I915_DRM_H_ -#include "drm.h" +#include <drm.h> /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. */ +/** + * DOC: uevents generated by i915 on it's device node + * + * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch + * event from the gpu l3 cache. Additional information supplied is ROW, + * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep + * track of these events and if a specific cache-line seems to have a + * persistent error remap it with the l3 remapping tool supplied in + * intel-gpu-tools. The value supplied with the event is always 1. + * + * I915_ERROR_UEVENT - Generated upon error detection, currently only via + * hangcheck. The error detection event is a good indicator of when things + * began to go badly. The value supplied with the event is a 1 upon error + * detection, and a 0 upon reset completion, signifying no more error + * exists. NOTE: Disabling hangcheck or reset via module parameter will + * cause the related events to not be seen. + * + * I915_RESET_UEVENT - Event is generated just before an attempt to reset the + * the GPU. The value supplied with the event is always 1. NOTE: Disable + * reset via module parameter will cause this event to not be seen. + */ +#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" +#define I915_ERROR_UEVENT "ERROR" +#define I915_RESET_UEVENT "RESET" /* Each region is a minimum of 16k, and there are at most 255 of them. */ @@ -195,8 +219,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_WAIT 0x2c #define DRM_I915_GEM_CONTEXT_CREATE 0x2d #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e -#define DRM_I915_GEM_SET_CACHEING 0x2f -#define DRM_I915_GEM_GET_CACHEING 0x30 +#define DRM_I915_GEM_SET_CACHING 0x2f +#define DRM_I915_GEM_GET_CACHING 0x30 #define DRM_I915_REG_READ 0x31 #define DRM_I915_GET_RESET_STATS 0x32 @@ -223,8 +247,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) -#define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing) -#define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing) +#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) +#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) @@ -305,7 +329,14 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_LLC 17 #define I915_PARAM_HAS_ALIASING_PPGTT 18 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 -#define I915_PARAM_HAS_VEBOX 22 +#define I915_PARAM_HAS_SEMAPHORES 20 +#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 +#define I915_PARAM_HAS_VEBOX 22 +#define I915_PARAM_HAS_SECURE_BATCHES 23 +#define I915_PARAM_HAS_PINNED_BATCHES 24 +#define I915_PARAM_HAS_EXEC_NO_RELOC 25 +#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 +#define I915_PARAM_HAS_WT 27 typedef struct drm_i915_getparam { int param; @@ -626,7 +657,11 @@ struct drm_i915_gem_exec_object2 { __u64 offset; #define EXEC_OBJECT_NEEDS_FENCE (1<<0) +#define EXEC_OBJECT_NEEDS_GTT (1<<1) +#define EXEC_OBJECT_WRITE (1<<2) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) __u64 flags; + __u64 rsvd1; __u64 rsvd2; }; @@ -672,6 +707,34 @@ struct drm_i915_gem_execbuffer2 { /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) +/** Request a privileged ("secure") batch buffer. Note only available for + * DRM_ROOT_ONLY | DRM_MASTER processes. + */ +#define I915_EXEC_SECURE (1<<9) + +/** Inform the kernel that the batch is and will always be pinned. This + * negates the requirement for a workaround to be performed to avoid + * an incoherent CS (such as can be found on 830/845). If this flag is + * not passed, the kernel will endeavour to make sure the batch is + * coherent with the CS before execution. If this flag is passed, + * userspace assumes the responsibility for ensuring the same. + */ +#define I915_EXEC_IS_PINNED (1<<10) + +/** Provide a hint to the kernel that the command stream and auxilliary + * state buffers already holds the correct presumed addresses and so the + * relocation process may be skipped if no buffers need to be moved in + * preparation for the execbuffer. + */ +#define I915_EXEC_NO_RELOC (1<<11) + +/** Use the reloc.handle as an index into the exec object array rather + * than as the per-file handle. + */ +#define I915_EXEC_HANDLE_LUT (1<<12) + +#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) + #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK @@ -708,21 +771,45 @@ struct drm_i915_gem_busy { __u32 busy; }; -#define I915_CACHEING_NONE 0 -#define I915_CACHEING_CACHED 1 +/** + * I915_CACHING_NONE + * + * GPU access is not coherent with cpu caches. Default for machines without an + * LLC. + */ +#define I915_CACHING_NONE 0 +/** + * I915_CACHING_CACHED + * + * GPU access is coherent with cpu caches and furthermore the data is cached in + * last-level caches shared between cpu cores and the gpu GT. Default on + * machines with HAS_LLC. + */ +#define I915_CACHING_CACHED 1 +/** + * I915_CACHING_DISPLAY + * + * Special GPU caching mode which is coherent with the scanout engines. + * Transparently falls back to I915_CACHING_NONE on platforms where no special + * cache mode (like write-through or gfdt flushing) is available. The kernel + * automatically sets this mode when using a buffer as a scanout target. + * Userspace can manually set this mode to avoid a costly stall and clflush in + * the hotpath of drawing the first frame. + */ +#define I915_CACHING_DISPLAY 2 -struct drm_i915_gem_cacheing { +struct drm_i915_gem_caching { /** - * Handle of the buffer to set/get the cacheing level of. */ + * Handle of the buffer to set/get the caching level of. */ __u32 handle; /** * Cacheing level to apply or return value * - * bits0-15 are for generic cacheing control (i.e. the above defined + * bits0-15 are for generic caching control (i.e. the above defined * values). bits16-31 are reserved for platform-specific variations * (e.g. l3$ caching on gen7). */ - __u32 cacheing; + __u32 caching; }; #define I915_TILING_NONE 0 @@ -962,4 +1049,4 @@ struct drm_i915_reset_stats { __u32 pad; }; -#endif /* _I915_DRM_H_ */ +#endif /* _I915_DRM_H_ */ -- 1.8.5.2 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel