On Fri, 12 Apr 2019 at 09:54, Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> wrote: > > Use the per-object local lock to control the cache domain of the > individual GEM objects, not struct_mutex. This is a huge leap forward > for us in terms of object-level synchronisation; execbuffers are > coordinated using the ww_mutex and pread/pwrite is finally fully > serialised again. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/Makefile | 1 + > drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 4 +- > drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 10 +- > drivers/gpu/drm/i915/gem/i915_gem_domain.c | 70 +++++----- > .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 123 ++++++++++++------ > drivers/gpu/drm/i915/gem/i915_gem_fence.c | 99 ++++++++++++++ > drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 + > drivers/gpu/drm/i915/gem/i915_gem_object.h | 14 ++ > drivers/gpu/drm/i915/gem/i915_gem_pm.c | 7 +- > .../gpu/drm/i915/gem/selftests/huge_pages.c | 12 +- > .../i915/gem/selftests/i915_gem_coherency.c | 12 ++ > .../drm/i915/gem/selftests/i915_gem_context.c | 20 +++ > .../drm/i915/gem/selftests/i915_gem_mman.c | 6 + > .../drm/i915/gem/selftests/i915_gem_phys.c | 4 +- > drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 4 + > drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 + > .../gpu/drm/i915/gt/selftest_workarounds.c | 8 ++ > drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 + > drivers/gpu/drm/i915/gvt/scheduler.c | 8 +- > drivers/gpu/drm/i915/i915_cmd_parser.c | 23 ++-- > drivers/gpu/drm/i915/i915_gem.c | 122 +++++++++-------- > drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +- > drivers/gpu/drm/i915/i915_gem_render_state.c | 2 + > drivers/gpu/drm/i915/i915_vma.c | 8 +- > drivers/gpu/drm/i915/i915_vma.h | 12 ++ > drivers/gpu/drm/i915/intel_display.c | 5 + > drivers/gpu/drm/i915/intel_guc_log.c | 6 +- > drivers/gpu/drm/i915/intel_overlay.c | 25 ++-- > drivers/gpu/drm/i915/intel_uc_fw.c | 2 + > drivers/gpu/drm/i915/selftests/i915_request.c | 4 + > drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 + > 31 files changed, 448 insertions(+), 176 deletions(-) > create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_fence.c > > diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile > index 8e70d5972195..e30622229812 100644 > --- a/drivers/gpu/drm/i915/Makefile > +++ b/drivers/gpu/drm/i915/Makefile > @@ -93,6 +93,7 @@ gem-y += \ > gem/i915_gem_dmabuf.o \ > gem/i915_gem_domain.o \ > gem/i915_gem_execbuffer.o \ > + gem/i915_gem_fence.o \ > gem/i915_gem_internal.o \ > gem/i915_gem_object.o \ > gem/i915_gem_mman.o \ > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c > index 093bfff55a96..efab47250588 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c > @@ -96,6 +96,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, > { > struct clflush *clflush; > > + assert_object_held(obj); > + > /* > * Stolen memory is always coherent with the GPU as it is explicitly > * marked as wc by the system, or the system is cache-coherent. > @@ -145,9 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, > true, I915_FENCE_TIMEOUT, > I915_FENCE_GFP); > > - reservation_object_lock(obj->resv, NULL); > reservation_object_add_excl_fence(obj->resv, &clflush->dma); > - reservation_object_unlock(obj->resv); > > i915_sw_fence_commit(&clflush->wait); > } else if (obj->mm.pages) { > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > index 4e7efe159531..50981ea513f0 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > @@ -152,7 +152,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * > static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) > { > struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); > - struct drm_device *dev = obj->base.dev; > bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); > int err; > > @@ -160,12 +159,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire > if (err) > return err; > > - err = i915_mutex_lock_interruptible(dev); > + err = i915_gem_object_lock_interruptible(obj); > if (err) > goto out; > > err = i915_gem_object_set_to_cpu_domain(obj, write); > - mutex_unlock(&dev->struct_mutex); > + i915_gem_object_unlock(obj); > > out: > i915_gem_object_unpin_pages(obj); > @@ -175,19 +174,18 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire > static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) > { > struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); > - struct drm_device *dev = obj->base.dev; > int err; > > err = i915_gem_object_pin_pages(obj); > if (err) > return err; > > - err = i915_mutex_lock_interruptible(dev); > + err = i915_gem_object_lock_interruptible(obj); > if (err) > goto out; > > err = i915_gem_object_set_to_gtt_domain(obj, false); > - mutex_unlock(&dev->struct_mutex); > + i915_gem_object_unlock(obj); > > out: > i915_gem_object_unpin_pages(obj); > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c > index 5e1429f8e910..4c84bb911d6c 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c > @@ -31,9 +31,9 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) > if (!READ_ONCE(obj->pin_global)) > return; > > - mutex_lock(&obj->base.dev->struct_mutex); > + i915_gem_object_lock(obj); > __i915_gem_object_flush_for_display(obj); > - mutex_unlock(&obj->base.dev->struct_mutex); > + i915_gem_object_unlock(obj); > } > > /** > @@ -49,11 +49,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) > { > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > + assert_object_held(obj); > > ret = i915_gem_object_wait(obj, > I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED | > (write ? I915_WAIT_ALL : 0), > MAX_SCHEDULE_TIMEOUT); > if (ret) > @@ -111,11 +110,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) > { > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > + assert_object_held(obj); > > ret = i915_gem_object_wait(obj, > I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED | > (write ? I915_WAIT_ALL : 0), > MAX_SCHEDULE_TIMEOUT); > if (ret) > @@ -181,7 +179,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > struct i915_vma *vma; > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > + assert_object_held(obj); > > if (obj->cache_level == cache_level) > return 0; > @@ -230,7 +228,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > */ > ret = i915_gem_object_wait(obj, > I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED | > I915_WAIT_ALL, > MAX_SCHEDULE_TIMEOUT); > if (ret) > @@ -374,12 +371,16 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, > if (ret) > goto out; > > - ret = i915_mutex_lock_interruptible(dev); > + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); > if (ret) > goto out; > > - ret = i915_gem_object_set_cache_level(obj, level); > - mutex_unlock(&dev->struct_mutex); > + ret = i915_gem_object_lock_interruptible(obj); > + if (ret == 0) { > + ret = i915_gem_object_set_cache_level(obj, level); > + i915_gem_object_unlock(obj); > + } > + mutex_unlock(&i915->drm.struct_mutex); > > out: > i915_gem_object_put(obj); > @@ -401,7 +402,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, > struct i915_vma *vma; > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > + assert_object_held(obj); > > /* Mark the global pin early so that we account for the > * display coherency whilst setting up the cache domains. > @@ -486,16 +487,18 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) > void > i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) > { > - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); > + struct drm_i915_gem_object *obj = vma->obj; > + > + assert_object_held(obj); > > - if (WARN_ON(vma->obj->pin_global == 0)) > + if (WARN_ON(obj->pin_global == 0)) > return; > > - if (--vma->obj->pin_global == 0) > + if (--obj->pin_global == 0) > vma->display_alignment = I915_GTT_MIN_ALIGNMENT; > > /* Bump the LRU to try and avoid premature eviction whilst flipping */ > - i915_gem_object_bump_inactive_ggtt(vma->obj); > + i915_gem_object_bump_inactive_ggtt(obj); > > i915_vma_unpin(vma); > } > @@ -513,11 +516,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) > { > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > + assert_object_held(obj); > > ret = i915_gem_object_wait(obj, > I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED | > (write ? I915_WAIT_ALL : 0), > MAX_SCHEDULE_TIMEOUT); > if (ret) > @@ -639,7 +641,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, > if (err) > goto out; > > - err = i915_mutex_lock_interruptible(dev); > + err = i915_gem_object_lock_interruptible(obj); > if (err) > goto out_unpin; > > @@ -653,7 +655,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, > /* And bump the LRU for this access */ > i915_gem_object_bump_inactive_ggtt(obj); > > - mutex_unlock(&dev->struct_mutex); > + i915_gem_object_unlock(obj); > > if (write_domain != 0) > intel_fb_obj_invalidate(obj, > @@ -676,22 +678,23 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, > { > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > - > *needs_clflush = 0; > if (!i915_gem_object_has_struct_page(obj)) > return -ENODEV; > > + ret = i915_gem_object_lock_interruptible(obj); > + if (ret) > + return ret; > + > ret = i915_gem_object_wait(obj, > - I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED, > + I915_WAIT_INTERRUPTIBLE, > MAX_SCHEDULE_TIMEOUT); > if (ret) > - return ret; > + goto err_unlock; > > ret = i915_gem_object_pin_pages(obj); > if (ret) > - return ret; > + goto err_unlock; > > if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || > !static_cpu_has(X86_FEATURE_CLFLUSH)) { > @@ -719,6 +722,8 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, > > err_unpin: > i915_gem_object_unpin_pages(obj); > +err_unlock: > + i915_gem_object_unlock(obj); > return ret; > } > > @@ -727,23 +732,24 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, > { > int ret; > > - lockdep_assert_held(&obj->base.dev->struct_mutex); > - > *needs_clflush = 0; > if (!i915_gem_object_has_struct_page(obj)) > return -ENODEV; > > + ret = i915_gem_object_lock_interruptible(obj); > + if (ret) > + return ret; > + > ret = i915_gem_object_wait(obj, > I915_WAIT_INTERRUPTIBLE | > - I915_WAIT_LOCKED | > I915_WAIT_ALL, > MAX_SCHEDULE_TIMEOUT); > if (ret) > - return ret; > + goto err_unlock; > > ret = i915_gem_object_pin_pages(obj); > if (ret) > - return ret; > + goto err_unlock; > > if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || > !static_cpu_has(X86_FEATURE_CLFLUSH)) { > @@ -780,5 +786,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, > > err_unpin: > i915_gem_object_unpin_pages(obj); > +err_unlock: > + i915_gem_object_unlock(obj); > return ret; > } > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c > index a2761029f0c3..080f69358224 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c > @@ -1076,7 +1076,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, > if (use_cpu_reloc(cache, obj)) > return NULL; > > + i915_gem_object_lock(obj); > err = i915_gem_object_set_to_gtt_domain(obj, true); > + i915_gem_object_unlock(obj); > if (err) > return ERR_PTR(err); > > @@ -1165,6 +1167,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) > *addr = value; > } > > +static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) > +{ > + struct drm_i915_gem_object *obj = vma->obj; > + int err; > + > + i915_vma_lock(vma); > + > + if (obj->cache_dirty & ~obj->cache_coherent) > + i915_gem_clflush_object(obj, 0); > + obj->write_domain = 0; > + > + err = i915_request_await_object(rq, vma->obj, true); > + if (err == 0) > + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); > + > + i915_vma_unlock(vma); > + > + return err; > +} > + > static int __reloc_gpu_alloc(struct i915_execbuffer *eb, > struct i915_vma *vma, > unsigned int len) > @@ -1176,15 +1198,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, > u32 *cmd; > int err; > > - if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { > - obj = vma->obj; > - if (obj->cache_dirty & ~obj->cache_coherent) > - i915_gem_clflush_object(obj, 0); > - obj->write_domain = 0; > - } > - > - GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); > - > obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); > if (IS_ERR(obj)) > return PTR_ERR(obj); > @@ -1213,7 +1226,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, > goto err_unpin; > } > > - err = i915_request_await_object(rq, vma->obj, true); > + err = reloc_move_to_gpu(rq, vma); > if (err) > goto err_request; > > @@ -1221,14 +1234,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, > batch->node.start, PAGE_SIZE, > cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); > if (err) > - goto err_request; > + goto skip_request; > > + i915_vma_lock(batch); > GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); > err = i915_vma_move_to_active(batch, rq, 0); > - if (err) > - goto skip_request; > - > - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); > + i915_vma_unlock(batch); > if (err) > goto skip_request; > > @@ -1837,24 +1848,59 @@ static int eb_relocate(struct i915_execbuffer *eb) > static int eb_move_to_gpu(struct i915_execbuffer *eb) > { > const unsigned int count = eb->buffer_count; > + struct ww_acquire_ctx acquire; > unsigned int i; > - int err; > + int err = 0; > + > + ww_acquire_init(&acquire, &reservation_ww_class); > > for (i = 0; i < count; i++) { > + struct i915_vma *vma = eb->vma[i]; > + > + err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); > + if (!err) > + continue; > + > + GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */ > + > + if (err == -EDEADLK) { > + GEM_BUG_ON(i == 0); > + do { > + int j = i - 1; > + > + ww_mutex_unlock(&eb->vma[j]->resv->lock); > + > + swap(eb->flags[i], eb->flags[j]); > + swap(eb->vma[i], eb->vma[j]); > + eb->vma[i]->exec_flags = &eb->flags[i]; > + } while (--i); > + GEM_BUG_ON(vma != eb->vma[0]); > + vma->exec_flags = &eb->flags[0]; > + > + err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, > + &acquire); > + } > + if (err) > + break; > + } > + ww_acquire_done(&acquire); > + > + while (i--) { > unsigned int flags = eb->flags[i]; > struct i915_vma *vma = eb->vma[i]; > struct drm_i915_gem_object *obj = vma->obj; > > + assert_vma_held(vma); > + > if (flags & EXEC_OBJECT_CAPTURE) { > struct i915_capture_list *capture; > > capture = kmalloc(sizeof(*capture), GFP_KERNEL); > - if (unlikely(!capture)) > - return -ENOMEM; > - > - capture->next = eb->request->capture_list; > - capture->vma = eb->vma[i]; > - eb->request->capture_list = capture; > + if (capture) { > + capture->next = eb->request->capture_list; > + capture->vma = vma; > + eb->request->capture_list = capture; > + } > } > > /* > @@ -1874,24 +1920,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) > flags &= ~EXEC_OBJECT_ASYNC; > } > > - if (flags & EXEC_OBJECT_ASYNC) > - continue; > - > - err = i915_request_await_object > - (eb->request, obj, flags & EXEC_OBJECT_WRITE); > - if (err) > - return err; > - } > + if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { > + err = i915_request_await_object > + (eb->request, obj, flags & EXEC_OBJECT_WRITE); > + } > > - for (i = 0; i < count; i++) { > - unsigned int flags = eb->flags[i]; > - struct i915_vma *vma = eb->vma[i]; > + if (err == 0) > + err = i915_vma_move_to_active(vma, eb->request, flags); > > - err = i915_vma_move_to_active(vma, eb->request, flags); > - if (unlikely(err)) { > - i915_request_skip(eb->request, err); > - return err; > - } > + i915_vma_unlock(vma); > > __eb_unreserve_vma(vma, flags); > vma->exec_flags = NULL; > @@ -1899,12 +1936,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) > if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) > i915_vma_put(vma); > } > + ww_acquire_fini(&acquire); > + > + if (unlikely(err)) > + goto err_skip; > + > eb->exec = NULL; > > /* Unconditionally flush any chipset caches (for streaming writes). */ > i915_gem_chipset_flush(eb->i915); > - > return 0; > + > +err_skip: > + i915_request_skip(eb->request, err); > + return err; > } > > static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c > new file mode 100644 > index 000000000000..021425f30a66 > --- /dev/null > +++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c > @@ -0,0 +1,99 @@ > +/* > + * SPDX-License-Identifier: MIT > + * > + * Copyright © 2019 Intel Corporation > + */ > + > +#include "i915_gem_object.h" > + > +#include "../i915_drv.h" > + > +static DEFINE_SPINLOCK(fence_lock); > + > +struct stub_fence { > + struct dma_fence dma; > + struct i915_sw_fence chain; > +}; > + > +static int __i915_sw_fence_call > +stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) > +{ > + struct stub_fence *stub = container_of(fence, typeof(*stub), chain); > + > + switch (state) { > + case FENCE_COMPLETE: > + dma_fence_signal(&stub->dma); > + break; > + > + case FENCE_FREE: > + dma_fence_put(&stub->dma); > + break; > + } > + > + return NOTIFY_DONE; > +} > + > +static const char *stub_driver_name(struct dma_fence *fence) > +{ > + return DRIVER_NAME; > +} > + > +static const char *stub_timeline_name(struct dma_fence *fence) > +{ > + return "object"; > +} > + > +static void stub_release(struct dma_fence *fence) > +{ > + struct stub_fence *stub = container_of(fence, typeof(*stub), dma); > + > + i915_sw_fence_fini(&stub->chain); > + > + BUILD_BUG_ON(offsetof(typeof(*stub), dma)); > + dma_fence_free(&stub->dma); > +} > + > +static const struct dma_fence_ops stub_fence_ops = { > + .get_driver_name = stub_driver_name, > + .get_timeline_name = stub_timeline_name, > + .release = stub_release, > +}; > + > +struct dma_fence * > +i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) > +{ > + struct stub_fence *stub; > + > + assert_object_held(obj); > + > + stub = kmalloc(sizeof(*stub), GFP_KERNEL); > + if (!stub) > + return NULL; > + > + dma_fence_init(&stub->dma, &stub_fence_ops, &fence_lock, > + to_i915(obj->base.dev)->mm.unordered_timeline, > + 0); > + i915_sw_fence_init(&stub->chain, stub_notify); > + > + if (i915_sw_fence_await_reservation(&stub->chain, > + obj->resv, NULL, > + true, I915_FENCE_TIMEOUT, > + I915_FENCE_GFP) < 0) > + goto err; > + > + reservation_object_add_excl_fence(obj->resv, &stub->dma); > + > + return &stub->dma; > + > +err: > + stub_release(&stub->dma); > + return NULL; > +} > + > +void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, > + struct dma_fence *fence) > +{ > + struct stub_fence *stub = container_of(fence, typeof(*stub), dma); > + > + i915_sw_fence_commit(&stub->chain); > +} Nifty. Hopefully I didn't miss anything, Reviewed-by: Matthew Auld <matthew.auld@xxxxxxxxx> _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx