On 1/26/22 16:21, Matthew Auld wrote:
On platforms where there might be non-mappable LMEM, force userspace to
mark the buffers with the correct hint. When dumping the BO contents
during capture we need CPU access. Note this only applies to buffers
that can be placed in LMEM, and also doesn't impact DG1.
Oddly enough this seems to break DG1. We probably need to understand why.
/Thomas
Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx>
Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
---
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 498b458fd784..3c8083852620 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1965,7 +1965,7 @@ eb_find_first_request_added(struct i915_execbuffer *eb)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
-static void eb_capture_stage(struct i915_execbuffer *eb)
+static int eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count, j;
@@ -1978,6 +1978,9 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
if (!(flags & EXEC_OBJECT_CAPTURE))
continue;
+ if (vma->obj->flags & I915_BO_ALLOC_TOPDOWN)
+ return -EINVAL;
+
for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
@@ -1990,6 +1993,8 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
eb->capture_lists[j] = capture;
}
}
+
+ return 0;
}
/* Commit once we're in the critical path */
@@ -3418,7 +3423,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
ww_acquire_done(&eb.ww.ctx);
- eb_capture_stage(&eb);
+ err = eb_capture_stage(&eb);
+ if (err)
+ goto err_vma;
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
if (IS_ERR(out_fence)) {