On Fri, May 20, 2016 at 06:59:31PM -0400, robert.foss@xxxxxxxxxxxxx wrote: > From: Robert Foss <robert.foss@xxxxxxxxxxxxx> > > This patch provides stubs for functionality otherwise provided by libdrm_intel. > > The stubbed functions all fail with a call to igt_require(false). > Defines and enums have been copied from libdrm_intel. For prettiness mayb igt_require_f(false, "Not compiled with libdrm_intel support\n"); or something similar. -Daniel > > Due to the stubbed tests failing with an igt_require() call, these stubs are > not well suited for non-tests, since tools/benchmarks/etc 'skipping' > execution is unhelpful. > > Signed-off-by: Robert Foss <robert.foss@xxxxxxxxxxxxx> > --- > lib/Makefile.sources | 2 + > lib/intel_drm_stubs.c | 258 +++++++++++++ > lib/intel_drm_stubs.h | 999 ++++++++++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 1259 insertions(+) > create mode 100644 lib/intel_drm_stubs.c > create mode 100644 lib/intel_drm_stubs.h > > diff --git a/lib/Makefile.sources b/lib/Makefile.sources > index 1316fd2..c0f9f6d 100644 > --- a/lib/Makefile.sources > +++ b/lib/Makefile.sources > @@ -21,6 +21,8 @@ libintel_tools_la_SOURCES = \ > intel_batchbuffer.c \ > intel_batchbuffer.h \ > intel_chipset.h \ > + intel_drm_stubs.c \ > + intel_drm_stubs.h \ > intel_os.c \ > intel_io.h \ > intel_mmio.c \ > diff --git a/lib/intel_drm_stubs.c b/lib/intel_drm_stubs.c > new file mode 100644 > index 0000000..d46a9b3 > --- /dev/null > +++ b/lib/intel_drm_stubs.c > @@ -0,0 +1,258 @@ > +#ifndef HAVE_INTEL > + > +#include "intel_drm_stubs.h" > + > +drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size) > +{ > + igt_require(false); > + return (drm_intel_bufmgr *) NULL; > +} > + > +void drm_intel_bo_unreference(drm_intel_bo *bo) > +{ > + igt_require(false); > +} > + > +drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, > + unsigned long size, unsigned int alignment) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset, > + unsigned long size, const void *data) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, > + int used, unsigned int flags) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, uint32_t target_offset, > + uint32_t read_domains, uint32_t write_domain) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, > + uint32_t target_offset, > + uint32_t read_domains, uint32_t write_domain) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t * swizzle_mode) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used, > + struct drm_clip_rect *cliprects, int num_cliprects, int DR4, > + unsigned int flags) > +{ > + igt_require(false); > + return 0; > +} > + > +void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, > + drm_intel_aub_annotation *annotations, > + unsigned count) > +{ > + igt_require(false); > +} > + > +void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) > +{ > + igt_require(false); > +} > + > +int drm_intel_bo_exec(drm_intel_bo *bo, int used, > + struct drm_clip_rect *cliprects, int num_cliprects, int DR4) > +{ > + igt_require(false); > + return 0; > +} > + > +void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr) > +{ > + igt_require(false); > +} > + > +void drm_intel_bo_wait_rendering(drm_intel_bo *bo) > +{ > + igt_require(false); > +} > + > +int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, > + unsigned long size, void *data) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_map(drm_intel_bo *bo, int write_enable) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) > +{ > + igt_require(false); > + return 0; > +} > + > +void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr) > +{ > + igt_require(false); > +} > + > +int drm_intel_bo_unmap(drm_intel_bo *bo) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name) > +{ > + igt_require(false); > + return 0; > +} > + > +drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, > + const char *name, > + unsigned int handle) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) > +{ > + igt_require(false); > + return 0; > +} > + > +drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, > + int prime_fd, int size) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, > + int limit) > +{ > + igt_require(false); > +} > + > +int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) > +{ > + igt_require(false); > + return 0; > +} > + > +drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +void drm_intel_gem_context_destroy(drm_intel_context *ctx) > +{ > + igt_require(false); > +} > + > +drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, > + const char *name, > + int x, int y, int cpp, > + uint32_t *tiling_mode, > + unsigned long *pitch, > + unsigned long flags) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +void drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, > + const char *filename) > +{ > + igt_require(false); > +} > + > +void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) > +{ > + igt_require(false); > +} > + > +void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, > + int x1, int y1, int width, int height, > + enum aub_dump_bmp_format format, > + int pitch, int offset) > +{ > + igt_require(false); > +} > + > +void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) > +{ > + igt_require(false); > +} > + > +int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t stride) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_bo_disable_reuse(drm_intel_bo *bo) > +{ > + igt_require(false); > + return 0; > +} > + > +void drm_intel_bo_reference(drm_intel_bo *bo) > +{ > + igt_require(false); > +} > + > +int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) > +{ > + igt_require(false); > + return 0; > +} > + > +drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, > + const char *name, > + unsigned long size, > + unsigned int alignment) > +{ > + igt_require(false); > + return (drm_intel_bo *) NULL; > +} > + > +int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) > +{ > + igt_require(false); > + return 0; > +} > + > +int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) > +{ > + igt_require(false); > + return 0; > +} > + > +#endif//HAVE_INTEL > diff --git a/lib/intel_drm_stubs.h b/lib/intel_drm_stubs.h > new file mode 100644 > index 0000000..fdfaaeb > --- /dev/null > +++ b/lib/intel_drm_stubs.h > @@ -0,0 +1,999 @@ > +#ifndef INTEL_DRM_STUBS_H > +#define INTEL_DRM_STUBS_H > + > +#include <xf86drm.h> > + > +#include "igt_core.h" > + > +#ifdef HAVE_INTEL > +#include <i915_drm.h> > +#include <intel_bufmgr.h> > +#include <drm.h> > + > + > +#else > +#define i915_execbuffer2_set_context_id(eb2, context) igt_require(false); > +#define i915_execbuffer2_get_context_id(eb2) igt_require(false); > + > +#define I915_EXEC_GEN7_SOL_RESET (1<<8) > +#define I915_EXEC_SECURE (1<<9) > +#define I915_EXEC_IS_PINNED (1<<10) > +#define I915_EXEC_NO_RELOC (1<<11) > +#define I915_EXEC_HANDLE_LUT (1<<12) > +#define I915_EXEC_BSD_MASK (3<<13) > +#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ > +#define I915_EXEC_BSD_RING1 (1<<13) > +#define I915_EXEC_BSD_RING2 (2<<13) > + > +#define I915_MADV_WILLNEED 0 > +#define I915_MADV_DONTNEED 1 > + > +#define I915_TILING_NONE 0 > +#define I915_TILING_X 1 > +#define I915_TILING_Y 2 > + > +#define I915_CACHING_NONE 0 > +#define I915_CACHING_CACHED 1 > +#define I915_CACHING_DISPLAY 2 > + > +#define I915_BIT_6_SWIZZLE_NONE 0 > +#define I915_BIT_6_SWIZZLE_9 1 > +#define I915_BIT_6_SWIZZLE_9_10 2 > +#define I915_BIT_6_SWIZZLE_9_11 3 > +#define I915_BIT_6_SWIZZLE_9_10_11 4 > +#define I915_BIT_6_SWIZZLE_UNKNOWN 5 > +#define I915_BIT_6_SWIZZLE_9_17 6 > +#define I915_BIT_6_SWIZZLE_9_10_17 7 > + > +#define I915_PARAM_CHIPSET_ID 4 > + > +#define I915_PARAM_IRQ_ACTIVE 1 > +#define I915_PARAM_ALLOW_BATCHBUFFER 2 > +#define I915_PARAM_LAST_DISPATCH 3 > +#define I915_PARAM_CHIPSET_ID 4 > +#define I915_PARAM_HAS_GEM 5 > +#define I915_PARAM_NUM_FENCES_AVAIL 6 > +#define I915_PARAM_HAS_OVERLAY 7 > +#define I915_PARAM_HAS_PAGEFLIPPING 8 > +#define I915_PARAM_HAS_EXECBUF2 9 > +#define I915_PARAM_HAS_BSD 10 > +#define I915_PARAM_HAS_BLT 11 > +#define I915_PARAM_HAS_RELAXED_FENCING 12 > +#define I915_PARAM_HAS_COHERENT_RINGS 13 > +#define I915_PARAM_HAS_EXEC_CONSTANTS 14 > +#define I915_PARAM_HAS_RELAXED_DELTA 15 > +#define I915_PARAM_HAS_GEN7_SOL_RESET 16 > +#define I915_PARAM_HAS_LLC 17 > +#define I915_PARAM_HAS_ALIASING_PPGTT 18 > +#define I915_PARAM_HAS_WAIT_TIMEOUT 19 > +#define I915_PARAM_HAS_SEMAPHORES 20 > +#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 > +#define I915_PARAM_HAS_VEBOX 22 > +#define I915_PARAM_HAS_SECURE_BATCHES 23 > +#define I915_PARAM_HAS_PINNED_BATCHES 24 > +#define I915_PARAM_HAS_EXEC_NO_RELOC 25 > +#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 > +#define I915_PARAM_HAS_WT 27 > +#define I915_PARAM_CMD_PARSER_VERSION 28 > +#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 > +#define I915_PARAM_MMAP_VERSION 30 > +#define I915_PARAM_HAS_BSD2 31 > +#define I915_PARAM_REVISION 32 > +#define I915_PARAM_SUBSLICE_TOTAL 33 > +#define I915_PARAM_EU_TOTAL 34 > +#define I915_PARAM_HAS_GPU_RESET 35 > +#define I915_PARAM_HAS_RESOURCE_STREAMER 36 > +#define I915_PARAM_HAS_EXEC_SOFTPIN 37 > + > +#define I915_GEM_DOMAIN_CPU 0x00000001 > +#define I915_GEM_DOMAIN_RENDER 0x00000002 > +#define I915_GEM_DOMAIN_SAMPLER 0x00000004 > +#define I915_GEM_DOMAIN_COMMAND 0x00000008 > +#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 > +#define I915_GEM_DOMAIN_VERTEX 0x00000020 > +#define I915_GEM_DOMAIN_GTT 0x00000040 > + > +#define DRM_I915_INIT 0x00 > +#define DRM_I915_FLUSH 0x01 > +#define DRM_I915_FLIP 0x02 > +#define DRM_I915_BATCHBUFFER 0x03 > +#define DRM_I915_IRQ_EMIT 0x04 > +#define DRM_I915_IRQ_WAIT 0x05 > +#define DRM_I915_GETPARAM 0x06 > +#define DRM_I915_SETPARAM 0x07 > +#define DRM_I915_ALLOC 0x08 > +#define DRM_I915_FREE 0x09 > +#define DRM_I915_INIT_HEAP 0x0a > +#define DRM_I915_CMDBUFFER 0x0b > +#define DRM_I915_DESTROY_HEAP 0x0c > +#define DRM_I915_SET_VBLANK_PIPE 0x0d > +#define DRM_I915_GET_VBLANK_PIPE 0x0e > +#define DRM_I915_VBLANK_SWAP 0x0f > +#define DRM_I915_HWS_ADDR 0x11 > +#define DRM_I915_GEM_INIT 0x13 > +#define DRM_I915_GEM_EXECBUFFER 0x14 > +#define DRM_I915_GEM_PIN 0x15 > +#define DRM_I915_GEM_UNPIN 0x16 > +#define DRM_I915_GEM_BUSY 0x17 > +#define DRM_I915_GEM_THROTTLE 0x18 > +#define DRM_I915_GEM_ENTERVT 0x19 > +#define DRM_I915_GEM_LEAVEVT 0x1a > +#define DRM_I915_GEM_CREATE 0x1b > +#define DRM_I915_GEM_PREAD 0x1c > +#define DRM_I915_GEM_PWRITE 0x1d > +#define DRM_I915_GEM_MMAP 0x1e > +#define DRM_I915_GEM_SET_DOMAIN 0x1f > +#define DRM_I915_GEM_SW_FINISH 0x20 > +#define DRM_I915_GEM_SET_TILING 0x21 > +#define DRM_I915_GEM_GET_TILING 0x22 > +#define DRM_I915_GEM_GET_APERTURE 0x23 > +#define DRM_I915_GEM_MMAP_GTT 0x24 > +#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 > +#define DRM_I915_GEM_MADVISE 0x26 > +#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 > +#define DRM_I915_OVERLAY_ATTRS 0x28 > +#define DRM_I915_GEM_EXECBUFFER2 0x29 > +#define DRM_I915_GET_SPRITE_COLORKEY 0x2a > +#define DRM_I915_SET_SPRITE_COLORKEY 0x2b > +#define DRM_I915_GEM_WAIT 0x2c > +#define DRM_I915_GEM_CONTEXT_CREATE 0x2d > +#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e > +#define DRM_I915_GEM_SET_CACHING 0x2f > +#define DRM_I915_GEM_GET_CACHING 0x30 > +#define DRM_I915_REG_READ 0x31 > +#define DRM_I915_GET_RESET_STATS 0x32 > +#define DRM_I915_GEM_USERPTR 0x33 > +#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 > +#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 > + > +#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) > +#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) > +#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) > +#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) > +#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) > +#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) > +#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) > +#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) > +#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) > +#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) > +#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) > +#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) > +#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) > +#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) > +#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) > +#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) > +#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) > +#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) > +#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) > +#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) > +#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) > +#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) > +#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) > +#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) > +#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) > +#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) > +#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) > +#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) > +#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) > +#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) > +#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) > +#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) > +#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) > +#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) > +#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) > +#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) > +#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) > +#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) > +#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) > +#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) > +#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) > +#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) > +#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) > +#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) > +#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) > +#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) > +#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) > +#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) > +#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) > +#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) > +#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) > +#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) > + > +enum aub_dump_bmp_format { > + AUB_DUMP_BMP_FORMAT_8BIT = 1, > + AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4, > + AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6, > + AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7, > +}; > + > +typedef struct _drm_intel_bo drm_intel_bo; > +typedef struct _drm_intel_bufmgr drm_intel_bufmgr; > +typedef struct _drm_intel_context drm_intel_context; > + > +struct _drm_intel_context { > + unsigned int ctx_id; > + struct _drm_intel_bufmgr *bufmgr; > +}; > + > +struct _drm_intel_bufmgr { > + /** > + * Allocate a buffer object. > + * > + * Buffer objects are not necessarily initially mapped into CPU virtual > + * address space or graphics device aperture. They must be mapped > + * using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU. > + */ > + drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name, > + unsigned long size, unsigned int alignment); > + > + /** > + * Allocate a buffer object, hinting that it will be used as a > + * render target. > + * > + * This is otherwise the same as bo_alloc. > + */ > + drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr, > + const char *name, > + unsigned long size, > + unsigned int alignment); > + > + /** > + * Allocate a buffer object from an existing user accessible > + * address malloc'd with the provided size. > + * Alignment is used when mapping to the gtt. > + * Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED > + */ > + drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr, > + const char *name, void *addr, > + uint32_t tiling_mode, uint32_t stride, > + unsigned long size, > + unsigned long flags); > + > + /** > + * Allocate a tiled buffer object. > + * > + * Alignment for tiled objects is set automatically; the 'flags' > + * argument provides a hint about how the object will be used initially. > + * > + * Valid tiling formats are: > + * I915_TILING_NONE > + * I915_TILING_X > + * I915_TILING_Y > + * > + * Note the tiling format may be rejected; callers should check the > + * 'tiling_mode' field on return, as well as the pitch value, which > + * may have been rounded up to accommodate for tiling restrictions. > + */ > + drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr, > + const char *name, > + int x, int y, int cpp, > + uint32_t *tiling_mode, > + unsigned long *pitch, > + unsigned long flags); > + > + /** Takes a reference on a buffer object */ > + void (*bo_reference) (drm_intel_bo *bo); > + > + /** > + * Releases a reference on a buffer object, freeing the data if > + * no references remain. > + */ > + void (*bo_unreference) (drm_intel_bo *bo); > + > + /** > + * Maps the buffer into userspace. > + * > + * This function will block waiting for any existing execution on the > + * buffer to complete, first. The resulting mapping is available at > + * buf->virtual. > + */ > + int (*bo_map) (drm_intel_bo *bo, int write_enable); > + > + /** > + * Reduces the refcount on the userspace mapping of the buffer > + * object. > + */ > + int (*bo_unmap) (drm_intel_bo *bo); > + > + /** > + * Write data into an object. > + * > + * This is an optional function, if missing, > + * drm_intel_bo will map/memcpy/unmap. > + */ > + int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset, > + unsigned long size, const void *data); > + > + /** > + * Read data from an object > + * > + * This is an optional function, if missing, > + * drm_intel_bo will map/memcpy/unmap. > + */ > + int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset, > + unsigned long size, void *data); > + > + /** > + * Waits for rendering to an object by the GPU to have completed. > + * > + * This is not required for any access to the BO by bo_map, > + * bo_subdata, etc. It is merely a way for the driver to implement > + * glFinish. > + */ > + void (*bo_wait_rendering) (drm_intel_bo *bo); > + > + /** > + * Tears down the buffer manager instance. > + */ > + void (*destroy) (drm_intel_bufmgr *bufmgr); > + > + /** > + * Indicate if the buffer can be placed anywhere in the full ppgtt > + * address range (2^48). > + * > + * Any resource used with flat/heapless (0x00000000-0xfffff000) > + * General State Heap (GSH) or Intructions State Heap (ISH) must > + * be in a 32-bit range. 48-bit range will only be used when explicitly > + * requested. > + * > + * \param bo Buffer to set the use_48b_address_range flag. > + * \param enable The flag value. > + */ > + void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable); > + > + /** > + * Add relocation entry in reloc_buf, which will be updated with the > + * target buffer's real offset on on command submission. > + * > + * Relocations remain in place for the lifetime of the buffer object. > + * > + * \param bo Buffer to write the relocation into. > + * \param offset Byte offset within reloc_bo of the pointer to > + * target_bo. > + * \param target_bo Buffer whose offset should be written into the > + * relocation entry. > + * \param target_offset Constant value to be added to target_bo's > + * offset in relocation entry. > + * \param read_domains GEM read domains which the buffer will be > + * read into by the command that this relocation > + * is part of. > + * \param write_domains GEM read domains which the buffer will be > + * dirtied in by the command that this > + * relocation is part of. > + */ > + int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, uint32_t target_offset, > + uint32_t read_domains, uint32_t write_domain); > + int (*bo_emit_reloc_fence)(drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, > + uint32_t target_offset, > + uint32_t read_domains, > + uint32_t write_domain); > + > + /** Executes the command buffer pointed to by bo. */ > + int (*bo_exec) (drm_intel_bo *bo, int used, > + drm_clip_rect_t *cliprects, int num_cliprects, > + int DR4); > + > + /** Executes the command buffer pointed to by bo on the selected > + * ring buffer > + */ > + int (*bo_mrb_exec) (drm_intel_bo *bo, int used, > + drm_clip_rect_t *cliprects, int num_cliprects, > + int DR4, unsigned flags); > + > + /** > + * Pin a buffer to the aperture and fix the offset until unpinned > + * > + * \param buf Buffer to pin > + * \param alignment Required alignment for aperture, in bytes > + */ > + int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment); > + > + /** > + * Unpin a buffer from the aperture, allowing it to be removed > + * > + * \param buf Buffer to unpin > + */ > + int (*bo_unpin) (drm_intel_bo *bo); > + > + /** > + * Ask that the buffer be placed in tiling mode > + * > + * \param buf Buffer to set tiling mode for > + * \param tiling_mode desired, and returned tiling mode > + */ > + int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t stride); > + > + /** > + * Get the current tiling (and resulting swizzling) mode for the bo. > + * > + * \param buf Buffer to get tiling mode for > + * \param tiling_mode returned tiling mode > + * \param swizzle_mode returned swizzling mode > + */ > + int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t * swizzle_mode); > + > + /** > + * Set the offset at which this buffer will be softpinned > + * \param bo Buffer to set the softpin offset for > + * \param offset Softpin offset > + */ > + int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset); > + > + /** > + * Create a visible name for a buffer which can be used by other apps > + * > + * \param buf Buffer to create a name for > + * \param name Returned name > + */ > + int (*bo_flink) (drm_intel_bo *bo, uint32_t * name); > + > + /** > + * Returns 1 if mapping the buffer for write could cause the process > + * to block, due to the object being active in the GPU. > + */ > + int (*bo_busy) (drm_intel_bo *bo); > + > + /** > + * Specify the volatility of the buffer. > + * \param bo Buffer to create a name for > + * \param madv The purgeable status > + * > + * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be > + * reclaimed under memory pressure. If you subsequently require the buffer, > + * then you must pass I915_MADV_WILLNEED to mark the buffer as required. > + * > + * Returns 1 if the buffer was retained, or 0 if it was discarded whilst > + * marked as I915_MADV_DONTNEED. > + */ > + int (*bo_madvise) (drm_intel_bo *bo, int madv); > + > + int (*check_aperture_space) (drm_intel_bo ** bo_array, int count); > + > + /** > + * Disable buffer reuse for buffers which will be shared in some way, > + * as with scanout buffers. When the buffer reference count goes to > + * zero, it will be freed and not placed in the reuse list. > + * > + * \param bo Buffer to disable reuse for > + */ > + int (*bo_disable_reuse) (drm_intel_bo *bo); > + > + /** > + * Query whether a buffer is reusable. > + * > + * \param bo Buffer to query > + */ > + int (*bo_is_reusable) (drm_intel_bo *bo); > + > + /** > + * > + * Return the pipe associated with a crtc_id so that vblank > + * synchronization can use the correct data in the request. > + * This is only supported for KMS and gem at this point, when > + * unsupported, this function returns -1 and leaves the decision > + * of what to do in that case to the caller > + * > + * \param bufmgr the associated buffer manager > + * \param crtc_id the crtc identifier > + */ > + int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id); > + > + /** Returns true if target_bo is in the relocation tree rooted at bo. */ > + int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo); > + > + /**< Enables verbose debugging printouts */ > + int debug; > +}; > + > +struct _drm_intel_bo { > + /** > + * Size in bytes of the buffer object. > + * > + * The size may be larger than the size originally requested for the > + * allocation, such as being aligned to page size. > + */ > + unsigned long size; > + > + /** > + * Alignment requirement for object > + * > + * Used for GTT mapping & pinning the object. > + */ > + unsigned long align; > + > + /** > + * Deprecated field containing (possibly the low 32-bits of) the last > + * seen virtual card address. Use offset64 instead. > + */ > + unsigned long offset; > + > + /** > + * Virtual address for accessing the buffer data. Only valid while > + * mapped. > + */ > +#ifdef __cplusplus > + void *virt; > +#else > + void *virtual; > +#endif > + > + /** Buffer manager context associated with this buffer object */ > + drm_intel_bufmgr *bufmgr; > + > + /** > + * MM-specific handle for accessing object > + */ > + int handle; > + > + /** > + * Last seen card virtual address (offset from the beginning of the > + * aperture) for the object. This should be used to fill relocation > + * entries when calling drm_intel_bo_emit_reloc() > + */ > + uint64_t offset64; > +}; > + > +typedef struct drm_i915_getparam { > + __s32 param; > + /* > + * WARNING: Using pointers instead of fixed-size u64 means we need to write > + * compat32 code. Don't repeat this mistake. > + */ > + int *value; > +} drm_i915_getparam_t; > + > +struct drm_i915_gem_execbuffer2 { > + /** > + * List of gem_exec_object2 structs > + */ > + __u64 buffers_ptr; > + __u32 buffer_count; > + > + /** Offset in the batchbuffer to start execution from. */ > + __u32 batch_start_offset; > + /** Bytes used in batchbuffer from batch_start_offset */ > + __u32 batch_len; > + __u32 DR1; > + __u32 DR4; > + __u32 num_cliprects; > + /** This is a struct drm_clip_rect *cliprects */ > + __u64 cliprects_ptr; > +#define I915_EXEC_RING_MASK (7<<0) > +#define I915_EXEC_DEFAULT (0<<0) > +#define I915_EXEC_RENDER (1<<0) > +#define I915_EXEC_BSD (2<<0) > +#define I915_EXEC_BLT (3<<0) > +#define I915_EXEC_VEBOX (4<<0) > + > +/* Used for switching the constants addressing mode on gen4+ RENDER ring. > + * Gen6+ only supports relative addressing to dynamic state (default) and > + * absolute addressing. > + * > + * These flags are ignored for the BSD and BLT rings. > + */ > +#define I915_EXEC_CONSTANTS_MASK (3<<6) > +#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ > +#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) > +#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ > + __u64 flags; > + __u64 rsvd1; /* now used for context info */ > + __u64 rsvd2; > +}; > + > +struct drm_i915_gem_exec_object2 { > + /** > + * User's handle for a buffer to be bound into the GTT for this > + * operation. > + */ > + __u32 handle; > + > + /** Number of relocations to be performed on this buffer */ > + __u32 relocation_count; > + /** > + * Pointer to array of struct drm_i915_gem_relocation_entry containing > + * the relocations to be performed in this buffer. > + */ > + __u64 relocs_ptr; > + > + /** Required alignment in graphics aperture */ > + __u64 alignment; > + > + /** > + * When the EXEC_OBJECT_PINNED flag is specified this is populated by > + * the user with the GTT offset at which this object will be pinned. > + * When the I915_EXEC_NO_RELOC flag is specified this must contain the > + * presumed_offset of the object. > + * During execbuffer2 the kernel populates it with the value of the > + * current GTT offset of the object, for future presumed_offset writes. > + */ > + __u64 offset; > + > +#define EXEC_OBJECT_NEEDS_FENCE (1<<0) > +#define EXEC_OBJECT_NEEDS_GTT (1<<1) > +#define EXEC_OBJECT_WRITE (1<<2) > +#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) > +#define EXEC_OBJECT_PINNED (1<<4) > +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) > + __u64 flags; > + > + __u64 rsvd1; > + __u64 rsvd2; > +}; > + > +struct drm_i915_gem_relocation_entry { > + /** > + * Handle of the buffer being pointed to by this relocation entry. > + * > + * It's appealing to make this be an index into the mm_validate_entry > + * list to refer to the buffer, but this allows the driver to create > + * a relocation list for state buffers and not re-write it per > + * exec using the buffer. > + */ > + __u32 target_handle; > + > + /** > + * Value to be added to the offset of the target buffer to make up > + * the relocation entry. > + */ > + __u32 delta; > + > + /** Offset in the buffer the relocation entry will be written into */ > + __u64 offset; > + > + /** > + * Offset value of the target buffer that the relocation entry was last > + * written as. > + * > + * If the buffer has the same offset as last time, we can skip syncing > + * and writing the relocation. This value is written back out by > + * the execbuffer ioctl when the relocation is written. > + */ > + __u64 presumed_offset; > + > + /** > + * Target memory domains read by this operation. > + */ > + __u32 read_domains; > + > + /** > + * Target memory domains written by this operation. > + * > + * Note that only one domain may be written by the whole > + * execbuffer operation, so that where there are conflicts, > + * the application will get -EINVAL back. > + */ > + __u32 write_domain; > +}; > + > +struct drm_i915_gem_get_tiling { > + /** Handle of the buffer to get tiling state for. */ > + __u32 handle; > + > + /** > + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, > + * I915_TILING_Y). > + */ > + __u32 tiling_mode; > + > + /** > + * Returned address bit 6 swizzling required for CPU access through > + * mmap mapping. > + */ > + __u32 swizzle_mode; > + > + /** > + * Returned address bit 6 swizzling required for CPU access through > + * mmap mapping whilst bound. > + */ > + __u32 phys_swizzle_mode; > +}; > + > +struct drm_i915_gem_set_tiling { > + /** Handle of the buffer to have its tiling state updated */ > + __u32 handle; > + > + /** > + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, > + * I915_TILING_Y). > + * > + * This value is to be set on request, and will be updated by the > + * kernel on successful return with the actual chosen tiling layout. > + * > + * The tiling mode may be demoted to I915_TILING_NONE when the system > + * has bit 6 swizzling that can't be managed correctly by GEM. > + * > + * Buffer contents become undefined when changing tiling_mode. > + */ > + __u32 tiling_mode; > + > + /** > + * Stride in bytes for the object when in I915_TILING_X or > + * I915_TILING_Y. > + */ > + __u32 stride; > + > + /** > + * Returned address bit 6 swizzling required for CPU access through > + * mmap mapping. > + */ > + __u32 swizzle_mode; > +}; > + > +struct drm_i915_gem_pwrite { > + /** Handle for the object being written to. */ > + __u32 handle; > + __u32 pad; > + /** Offset into the object to write to */ > + __u64 offset; > + /** Length of data to write */ > + __u64 size; > + /** > + * Pointer to read the data from. > + * > + * This is a fixed-size type for 32/64 compatibility. > + */ > + __u64 data_ptr; > +}; > + > +struct drm_i915_gem_pread { > + /** Handle for the object being read. */ > + __u32 handle; > + __u32 pad; > + /** Offset into the object to read from */ > + __u64 offset; > + /** Length of data to read */ > + __u64 size; > + /** > + * Pointer to write the data into. > + * > + * This is a fixed-size type for 32/64 compatibility. > + */ > + __u64 data_ptr; > +}; > + > +struct drm_i915_gem_set_domain { > + /** Handle for the object */ > + __u32 handle; > + > + /** New read domains */ > + __u32 read_domains; > + > + /** New write domain */ > + __u32 write_domain; > +}; > + > +struct drm_i915_gem_wait { > + /** Handle of BO we shall wait on */ > + __u32 bo_handle; > + __u32 flags; > + /** Number of nanoseconds to wait, Returns time remaining. */ > + __s64 timeout_ns; > +}; > + > +struct drm_i915_gem_busy { > + /** Handle of the buffer to check for busy */ > + __u32 handle; > + > + /** Return busy status (1 if busy, 0 if idle). > + * The high word is used to indicate on which rings the object > + * currently resides: > + * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) > + */ > + __u32 busy; > +}; > + > +struct drm_i915_gem_get_aperture { > + /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ > + __u64 aper_size; > + > + /** > + * Available space in the aperture used by i915_gem_execbuffer, in > + * bytes > + */ > + __u64 aper_available_size; > +}; > + > +struct drm_i915_gem_create { > + /** > + * Requested size for the object. > + * > + * The (page-aligned) allocated size for the object will be returned. > + */ > + __u64 size; > + /** > + * Returned handle for the object. > + * > + * Object handles are nonzero. > + */ > + __u32 handle; > + __u32 pad; > +}; > + > +struct drm_i915_gem_mmap_gtt { > + /** Handle for the object being mapped. */ > + __u32 handle; > + __u32 pad; > + /** > + * Fake offset to use for subsequent mmap call > + * > + * This is a fixed-size type for 32/64 compatibility. > + */ > + __u64 offset; > +}; > + > +struct drm_i915_gem_sw_finish { > + /** Handle for the object */ > + __u32 handle; > +}; > + > +struct drm_i915_gem_mmap { > + /** Handle for the object being mapped. */ > + __u32 handle; > + __u32 pad; > + /** Offset in the object to map. */ > + __u64 offset; > + /** > + * Length of data to map. > + * > + * The value will be page-aligned. > + */ > + __u64 size; > + /** > + * Returned pointer the data was mapped at. > + * > + * This is a fixed-size type for 32/64 compatibility. > + */ > + __u64 addr_ptr; > + > + /** > + * Flags for extended behaviour. > + * > + * Added in version 2. > + */ > + __u64 flags; > +#define I915_MMAP_WC 0x1 > +}; > + > +struct drm_i915_gem_madvise { > + /** Handle of the buffer to change the backing store advice */ > + __u32 handle; > + > + /* Advice: either the buffer will be needed again in the near future, > + * or wont be and could be discarded under memory pressure. > + */ > + __u32 madv; > + > + /** Whether the backing store still exists. */ > + __u32 retained; > +}; > + > +struct drm_i915_gem_context_create { > + /* output: id of new context*/ > + __u32 ctx_id; > + __u32 pad; > +}; > + > +struct drm_i915_gem_context_destroy { > + __u32 ctx_id; > + __u32 pad; > +}; > + > +typedef struct _drm_intel_aub_annotation { > + uint32_t type; > + uint32_t subtype; > + uint32_t ending_offset; > +} drm_intel_aub_annotation; > + > +struct drm_i915_gem_pin { > + /** Handle of the buffer to be pinned. */ > + __u32 handle; > + __u32 pad; > + > + /** alignment required within the aperture */ > + __u64 alignment; > + > + /** Returned GTT offset of the buffer. */ > + __u64 offset; > +}; > + > +#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ > +#define I915_SET_COLORKEY_DESTINATION (1<<1) > +#define I915_SET_COLORKEY_SOURCE (1<<2) > +struct drm_intel_sprite_colorkey { > + __u32 plane_id; > + __u32 min_value; > + __u32 channel_mask; > + __u32 max_value; > + __u32 flags; > +}; > + > +struct drm_i915_reg_read { > + /* > + * Register offset. > + * For 64bit wide registers where the upper 32bits don't immediately > + * follow the lower 32bits, the offset of the lower 32bits must > + * be specified > + */ > + __u64 offset; > + __u64 val; /* Return value */ > +}; > + > +drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size); > +void drm_intel_bo_unreference(drm_intel_bo *bo); > +drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, > + unsigned long size, unsigned int alignment); > +int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset, > + unsigned long size, const void *data); > +int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, > + int used, unsigned int flags); > +int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, uint32_t target_offset, > + uint32_t read_domains, uint32_t write_domain); > +int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, > + drm_intel_bo *target_bo, > + uint32_t target_offset, > + uint32_t read_domains, uint32_t write_domain); > +int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t * swizzle_mode); > +int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used, > + struct drm_clip_rect *cliprects, int num_cliprects, int DR4, > + unsigned int flags); > +void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, > + drm_intel_aub_annotation *annotations, > + unsigned count); > +void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr); > +int drm_intel_bo_exec(drm_intel_bo *bo, int used, > + struct drm_clip_rect *cliprects, int num_cliprects, int DR4); > +void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr); > +void drm_intel_bo_wait_rendering(drm_intel_bo *bo); > +int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, > + unsigned long size, void *data); > +int drm_intel_bo_map(drm_intel_bo *bo, int write_enable); > +int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo); > +void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr); > +int drm_intel_bo_unmap(drm_intel_bo *bo); > +int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name); > +drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, > + const char *name, > + unsigned int handle); > +int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd); > +drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, > + int prime_fd, int size); > +void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, > + int limit); > +int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo); > +drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr); > +void drm_intel_gem_context_destroy(drm_intel_context *ctx); > +drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, > + const char *name, > + int x, int y, int cpp, > + uint32_t *tiling_mode, > + unsigned long *pitch, > + unsigned long flags); > +void drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, > + const char *filename); > +void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable); > +void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, > + int x1, int y1, int width, int height, > + enum aub_dump_bmp_format format, > + int pitch, int offset); > +void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable); > +int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, > + uint32_t stride); > +int drm_intel_bo_disable_reuse(drm_intel_bo *bo); > +void drm_intel_bo_reference(drm_intel_bo *bo); > +int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr); > +drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, > + const char *name, > + unsigned long size, > + unsigned int alignment); > +int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo); > +int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns); > + > +#endif > + > +#endif//INTEL_DRM_STUBS_H > -- > 2.7.4 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx