Handle querying and waiting on an external fence fd as opposed to the internal seqno fence. The key difference with the fd fences are that we can pass these external fences to the kernel/GPU for it to asynchronously wait upon (internal seqno fences are ordered by the GL command stream, hence do not need injection). Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- src/mesa/drivers/dri/i965/intel_syncobj.c | 84 +++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c index 14fc4b8..b5df498 100644 --- a/src/mesa/drivers/dri/i965/intel_syncobj.c +++ b/src/mesa/drivers/dri/i965/intel_syncobj.c @@ -39,6 +39,8 @@ */ #include <sys/mman.h> +#include <sys/poll.h> +#include <sys/ioctl.h> #include "main/imports.h" @@ -89,6 +91,7 @@ struct brw_fence { drm_intel_bo *batch; uint32_t *hw_seqno; uint32_t seqno; + int handle; }; static inline bool seqno_passed(const struct brw_fence *fence) @@ -152,10 +155,89 @@ static const struct brw_fence_ops seqno_ops = { .server_wait = seqno_server_wait, }; +static inline bool fd_poll(const struct brw_fence *fence, int timeout) +{ + struct pollfd pfd = { .fd = fence->handle, .events = POLLOUT }; + return poll(&pfd, 1, timeout) == 1; +} + +static bool fd_check(struct brw_fence *fence) +{ + return fd_poll(fence, 0); +} + +static bool fd_client_wait(struct brw_fence *fence, uint64_t timeout) +{ + int msecs; + + msecs = -1; + if (timeout != __DRI2_FENCE_TIMEOUT_INFINITE) { + timeout /= 1000 * 1000; /* nsecs to msecs */ + if (timeout < INT_MAX) + msecs = timeout; + } + + return fd_poll(fence, msecs); +} + +/*** libsync ***/ + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +#define SYNC_IOC_MAGIC '>' +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) + +static int sync_merge(int fd1, int fd2) +{ + struct sync_merge_data data; + + if (fd1 == -1) + return dup(fd2); + + if (fd2 == -1) + return dup(fd1); + + memset(&data, 0, sizeof(data)); + data.fd2 = fd2; + strcpy(data.name, "i965"); + + if (ioctl(fd1, SYNC_IOC_MERGE, &data)) + return -errno; + + return data.fence; +} + +/*** !libsync ***/ + +static void fd_server_wait(struct brw_fence *fence, struct brw_context *brw) +{ + if (fence->batch) + return; + + brw->batch.fence = sync_merge(brw->batch.fence, fence->handle); + + drm_intel_bo_reference(brw->batch.bo); + fence->batch = brw->batch.bo; +} + +static const struct brw_fence_ops fd_ops = { + .check = fd_check, + .client_wait = fd_client_wait, + .server_wait = fd_server_wait, +}; + static void brw_fence_finish(struct brw_fence *fence) { if (fence->batch) drm_intel_bo_unreference(fence->batch); + if (fence->handle != -1) + close(fence->handle); } struct intel_gl_sync_object { @@ -172,6 +254,7 @@ intel_gl_new_sync_object(struct gl_context *ctx, GLuint id) if (!sync) return NULL; + sync->fence.handle = -1; sync->fence.ops = &seqno_ops; return &sync->Base; } @@ -254,6 +337,7 @@ intel_dri_create_fence(__DRIcontext *ctx) if (!fence) return NULL; + fence->handle = -1; fence->ops = &seqno_ops; seqno_insert(fence, brw); -- 2.9.3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx