[PATCH] drm/i915: Serialise the fill BLT with the vma pinning

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Make sure that we wait for the vma to be pinned prior to telling the GPU
to fill the pages through that vma.

However, since our async operations fight over obj->resv->excl_fence we
must manually order them. This makes it much more fragile, and gives an
outside observer the chance to see the intermediate fences. To be
discussed!

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c    | 33 ++++++++++++++-----
 1 file changed, 25 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 3502071e1391..14dabc27a463 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -71,10 +71,30 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 		goto err_free;
 	}
 
+	/*
+	 * XXX fix scheduling with get_pages & clear workers
+	 *
+	 * The complication is that we end up overwriting the same
+	 * obj->resv->excl_fence for each stage of the operation. That fence
+	 * should be set on scheduling the work, and only signaled upon
+	 * completion of the entire workqueue.
+	 *
+	 * Within the workqueue, we use the fence to schedule each individual
+	 * task. Each individual task knows to use obj->resv->fence.
+	 *
+	 * To an outsider, they must wait until the end and so the
+	 * obj->resv->fence must be the composite.
+	 *
+	 * Ideas?
+	 */
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (unlikely(err))
+		goto err_free;
+
 	vma->private = sleeve;
 	vma->ops = &proxy_vma_ops;
 
-	sleeve->vma = vma;
+	sleeve->vma = i915_vma_get(vma);
 	sleeve->pages = pages;
 	sleeve->page_sizes = *page_sizes;
 
@@ -87,6 +107,9 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 
 static void destroy_sleeve(struct i915_sleeve *sleeve)
 {
+	i915_vma_unpin(sleeve->vma);
+	i915_vma_put(sleeve->vma);
+
 	kfree(sleeve);
 }
 
@@ -172,14 +195,10 @@ static void clear_pages_worker(struct work_struct *work)
 	obj->read_domains = I915_GEM_GPU_DOMAINS;
 	obj->write_domain = 0;
 
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (unlikely(err))
-		goto out_signal;
-
 	batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
-		goto out_unpin;
+		goto out_signal;
 	}
 
 	rq = intel_context_create_request(w->ce);
@@ -224,8 +243,6 @@ static void clear_pages_worker(struct work_struct *work)
 	i915_request_add(rq);
 out_batch:
 	intel_emit_vma_release(w->ce, batch);
-out_unpin:
-	i915_vma_unpin(vma);
 out_signal:
 	if (unlikely(err)) {
 		dma_fence_set_error(&w->dma, err);
-- 
2.23.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux