As a prelude to the next step where we want to perform all the object allocations together under the same lock, we first must delay the i915_vma_pin() as that implicitly does the allocations for us, one by one. As it only does the allocations one by one, it is not allowed to wait/evict, whereas pulling all the allocations together the entire set can be scheduled as one. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Reviewed-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxx> --- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 74 ++++++++++--------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index e9ef0c287fd9..2f6fa8b3a805 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -34,6 +34,8 @@ struct eb_vma { /** This vma's place in the execbuf reservation list */ struct drm_i915_gem_exec_object2 *exec; + + struct list_head bind_link; struct list_head unbound_link; struct list_head reloc_link; @@ -248,8 +250,8 @@ struct i915_execbuffer { /** actual size of execobj[] as we may extend it for the cmdparser */ unsigned int buffer_count; - /** list of vma not yet bound during reservation phase */ - struct list_head unbound; + /** list of all vma required to be bound for this execbuf */ + struct list_head bind_list; /** list of vma that have execobj.relocation_count */ struct list_head relocs_list; @@ -577,6 +579,8 @@ eb_add_vma(struct i915_execbuffer *eb, eb->lut_size)]); } + list_add_tail(&ev->bind_link, &eb->bind_list); + if (entry->relocation_count) list_add_tail(&ev->reloc_link, &eb->relocs_list); @@ -598,16 +602,6 @@ eb_add_vma(struct i915_execbuffer *eb, eb->batch = ev; } - - if (eb_pin_vma(eb, entry, ev)) { - if (entry->offset != vma->node.start) { - entry->offset = vma->node.start | UPDATE; - eb->args->flags |= __EXEC_HAS_RELOC; - } - } else { - eb_unreserve_vma(ev); - list_add_tail(&ev->unbound_link, &eb->unbound); - } } static int eb_reserve_vma(const struct i915_execbuffer *eb, @@ -682,13 +676,31 @@ static int wait_for_timeline(struct intel_timeline *tl) } while (1); } -static int eb_reserve(struct i915_execbuffer *eb) +static int eb_reserve_vm(struct i915_execbuffer *eb) { - const unsigned int count = eb->buffer_count; unsigned int pin_flags = PIN_USER | PIN_NONBLOCK; - struct list_head last; + struct list_head last, unbound; struct eb_vma *ev; - unsigned int i, pass; + unsigned int pass; + + INIT_LIST_HEAD(&unbound); + list_for_each_entry(ev, &eb->bind_list, bind_link) { + struct drm_i915_gem_exec_object2 *entry = ev->exec; + struct i915_vma *vma = ev->vma; + + if (eb_pin_vma(eb, entry, ev)) { + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start | UPDATE; + eb->args->flags |= __EXEC_HAS_RELOC; + } + } else { + eb_unreserve_vma(ev); + list_add_tail(&ev->unbound_link, &unbound); + } + } + + if (list_empty(&unbound)) + return 0; /* * Attempt to pin all of the buffers into the GTT. @@ -726,7 +738,7 @@ static int eb_reserve(struct i915_execbuffer *eb) if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) return -EINTR; - list_for_each_entry(ev, &eb->unbound, unbound_link) { + list_for_each_entry(ev, &unbound, unbound_link) { err = eb_reserve_vma(eb, ev, pin_flags); if (err) break; @@ -737,13 +749,11 @@ static int eb_reserve(struct i915_execbuffer *eb) } /* Resort *all* the objects into priority order */ - INIT_LIST_HEAD(&eb->unbound); + INIT_LIST_HEAD(&unbound); INIT_LIST_HEAD(&last); - for (i = 0; i < count; i++) { - unsigned int flags; + list_for_each_entry(ev, &eb->bind_list, bind_link) { + unsigned int flags = ev->flags; - ev = &eb->vma[i]; - flags = ev->flags; if (flags & EXEC_OBJECT_PINNED && flags & __EXEC_OBJECT_HAS_PIN) continue; @@ -752,17 +762,17 @@ static int eb_reserve(struct i915_execbuffer *eb) if (flags & EXEC_OBJECT_PINNED) /* Pinned must have their slot */ - list_add(&ev->unbound_link, &eb->unbound); + list_add(&ev->unbound_link, &unbound); else if (flags & __EXEC_OBJECT_NEEDS_MAP) /* Map require the lowest 256MiB (aperture) */ - list_add_tail(&ev->unbound_link, &eb->unbound); + list_add_tail(&ev->unbound_link, &unbound); else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) /* Prioritise 4GiB region for restricted bo */ list_add(&ev->unbound_link, &last); else list_add_tail(&ev->unbound_link, &last); } - list_splice_tail(&last, &eb->unbound); + list_splice_tail(&last, &unbound); mutex_unlock(&eb->i915->drm.struct_mutex); if (err == -EAGAIN) { @@ -933,8 +943,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) unsigned int i; int err = 0; + INIT_LIST_HEAD(&eb->bind_list); INIT_LIST_HEAD(&eb->relocs_list); - INIT_LIST_HEAD(&eb->unbound); for (i = 0; i < eb->buffer_count; i++) { struct i915_vma *vma; @@ -1583,16 +1593,10 @@ static int eb_relocate(struct i915_execbuffer *eb) { int err; - err = eb_lookup_vmas(eb); + err = eb_reserve_vm(eb); if (err) return err; - if (!list_empty(&eb->unbound)) { - err = eb_reserve(eb); - if (err) - return err; - } - /* The objects are in their final locations, apply the relocations. */ if (eb->args->flags & __EXEC_HAS_RELOC) { struct eb_vma *ev; @@ -2753,6 +2757,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_context; + err = eb_lookup_vmas(&eb); + if (unlikely(err)) + goto err_engine; + /* *** TIMELINE LOCK *** */ err = eb_lock_engine(&eb); if (unlikely(err)) -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx