Userspace is faced with a dilemma. The kernel requires implicit fencing to manage resource usage (we always must wait for the GPU to finish before releasing its PTE) and for third parties. However, userspace may wish to avoid this serialisation if it is either using explicit fencing between parties and wants more fine-grained access to buffers (e.g. it may partition the buffer between uses and track fences on ranges rather than the implicit fences tracking the whole object). To whit, userspace needs a mechanism to avoid the kernel's serialisation on its implicit fences before execbuf execution. The next question is whether this is an object, execbuf or context flag. Hybrid users (such as using explicit EGL_ANDROID_native_sync fencing on shared winsys buffers, but implicit fencing on internal surfaces) require a per-object level flag. Given that this flag need to be only set once for the lifetime of the object, this reduces the convenience of having an execbuf or context level flag (and avoids having multiple pieces of uABI controlling the same feature). Incorrect use of this flag will result in rendering corruption and GPU hangs - but will not result in use-after-free or similar resource tracking issues. Testcase: igt/gem_exec_async Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 +++++++++++++++------------- include/uapi/drm/i915_drm.h | 4 +++- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e4ff0277aca2..3ecd9e26bdfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -355,6 +355,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_MIN_EU_IN_POOL: value = INTEL_INFO(dev)->min_eu_in_pool; break; + case I915_PARAM_HAS_EXEC_ASYNC: + value = 1; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d9b6fe9f7542..a0f46293e492 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1199,20 +1199,22 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct drm_i915_gem_object *obj = vma->obj; struct reservation_object *resv; - if (obj->flags & other_rings) { - ret = eb_sync(obj, req, obj->base.pending_write_domain); - if (ret) - return ret; - } + if ((vma->exec_entry->flags & EXEC_OBJECT_ASYNC) == 0) { + if (obj->flags & other_rings) { + ret = eb_sync(obj, req, obj->base.pending_write_domain); + if (ret) + return ret; + } - resv = i915_gem_object_get_dmabuf_resv(obj); - if (resv) { - ret = i915_sw_fence_await_reservation - (&req->submit, resv, &i915_fence_ops, - obj->base.pending_write_domain, - GFP_KERNEL | __GFP_NOWARN); - if (ret < 0) - return ret; + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) { + ret = i915_sw_fence_await_reservation + (&req->submit, resv, &i915_fence_ops, + obj->base.pending_write_domain, + GFP_KERNEL | __GFP_NOWARN); + if (ret < 0) + return ret; + } } if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 5501fe83ed92..75f9cab23249 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -387,6 +387,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_EXEC_SOFTPIN 37 #define I915_PARAM_HAS_POOLED_EU 38 #define I915_PARAM_MIN_EU_IN_POOL 39 +#define I915_PARAM_HAS_EXEC_ASYNC 40 typedef struct drm_i915_getparam { __s32 param; @@ -728,8 +729,9 @@ struct drm_i915_gem_exec_object2 { #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) #define EXEC_OBJECT_PINNED (1<<4) #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) +#define EXEC_OBJECT_ASYNC (1<<6) /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ -#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1) __u64 flags; union { -- 2.9.3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx