> diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h > new file mode 100644 > index 000000000000..6d815df5e829 > --- /dev/null > +++ b/include/uapi/drm/panthor_drm.h > @@ -0,0 +1,892 @@ > +/* SPDX-License-Identifier: MIT */ > +/* Copyright (C) 2023 Collabora ltd. */ > +#ifndef _PANTHOR_DRM_H_ > +#define _PANTHOR_DRM_H_ > + > +#include "drm.h" > + > +#if defined(__cplusplus) > +extern "C" { > +#endif > + > +/** > + * DOC: Introduction ... > +/** > + * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND > + */ > +struct drm_panthor_group_submit { > + /** @group_handle: Handle of the group to queue jobs to. */ > + __u32 group_handle; > + > + /** @pad: MBZ. */ > + __u32 pad; > + > + /** @queue_submits: Array of drm_panthor_queue_submit objects. */ > + struct drm_panthor_obj_array queue_submits; > +}; Hi! Very minor nit - but shouldn't the comment above say DRM_IOCTL_PANTHOR_GROUP_SUBMIT, not VM_BIND? > + > +/** > + * enum drm_panthor_group_state_flags - Group state flags > + */ > +enum drm_panthor_group_state_flags { > + /** > + * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs. > + * > + * When a group ends up with this flag set, no jobs can be submitted to its queues. > + */ > + DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0, > + > + /** > + * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults. > + * > + * When a group ends up with this flag set, no jobs can be submitted to its queues. > + */ > + DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1, > +}; > + > +/** > + * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE > + * > + * Used to query the state of a group and decide whether a new group should be created to > + * replace it. > + */ > +struct drm_panthor_group_get_state { > + /** @group_handle: Handle of the group to query state on */ > + __u32 group_handle; > + > + /** > + * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the > + * group state. > + */ > + __u32 state; > + > + /** @fatal_queues: Bitmask of queues that faced fatal faults. */ > + __u32 fatal_queues; > + > + /** @pad: MBZ */ > + __u32 pad; > +}; > + > +/** > + * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE > + */ > +struct drm_panthor_tiler_heap_create { > + /** @vm_id: VM ID the tiler heap should be mapped to */ > + __u32 vm_id; > + > + /** @initial_chunk_count: Initial number of chunks to allocate. */ > + __u32 initial_chunk_count; > + > + /** @chunk_size: Chunk size. Must be a power of two at least 256KB large. */ > + __u32 chunk_size; > + > + /** @max_chunks: Maximum number of chunks that can be allocated. */ > + __u32 max_chunks; > + > + /** > + * @target_in_flight: Maximum number of in-flight render passes. > + * > + * If the heap has more than tiler jobs in-flight, the FW will wait for render > + * passes to finish before queuing new tiler jobs. > + */ > + __u32 target_in_flight; > + > + /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */ > + __u32 handle; > + > + /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */ > + __u64 tiler_heap_ctx_gpu_va; > + > + /** > + * @first_heap_chunk_gpu_va: First heap chunk. > + * > + * The tiler heap is formed of heap chunks forming a single-link list. This > + * is the first element in the list. > + */ > + __u64 first_heap_chunk_gpu_va; > +}; > + > +/** > + * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY > + */ > +struct drm_panthor_tiler_heap_destroy { > + /** @handle: Handle of the tiler heap to destroy */ > + __u32 handle; > + > + /** @pad: Padding field, MBZ. */ > + __u32 pad; > +}; > + > +#if defined(__cplusplus) > +} > +#endif > + > +#endif /* _PANTHOR_DRM_H_ */ Cheers, Chris