Note if the added fence is a write by using the lsb in the fenc pointer. Signed-off-by: Christian König <christian.koenig@xxxxxxx> --- drivers/dma-buf/dma-buf.c | 8 +++- drivers/dma-buf/reservation.c | 59 +++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 3 +- drivers/gpu/drm/i915/i915_gem.c | 6 ++- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/qxl/qxl_release.c | 3 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 3 +- drivers/gpu/drm/vc4/vc4_gem.c | 3 +- drivers/gpu/drm/vgem/vgem_fence.c | 2 +- include/linux/reservation.h | 20 +++++++--- 15 files changed, 76 insertions(+), 44 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 13884474d158..6b816cd505d6 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -244,7 +244,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) goto out; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence; + + fence = reservation_object_shared_fence( + rcu_dereference(fobj->shared[i])); if (!dma_fence_get_rcu(fence)) { /* @@ -1062,7 +1065,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un"); for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); + fence = reservation_object_shared_fence( + rcu_dereference(fobj->shared[i])); if (!dma_fence_get_rcu(fence)) continue; seq_printf(s, "\tShared fence: %s %s %ssignalled\n", diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 1f0c61b540ba..0f98384b86d4 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -93,14 +93,14 @@ int reservation_object_reserve_shared(struct reservation_object *obj) * the new. */ for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { - struct dma_fence *fence; + void *e; - fence = rcu_dereference_protected(old->shared[i], - reservation_object_held(obj)); - if (dma_fence_is_signaled(fence)) - RCU_INIT_POINTER(new->shared[--k], fence); + e = rcu_dereference_protected(old->shared[i], + reservation_object_held(obj)); + if (dma_fence_is_signaled(reservation_object_shared_fence(e))) + RCU_INIT_POINTER(new->shared[--k], e); else - RCU_INIT_POINTER(new->shared[j++], fence); + RCU_INIT_POINTER(new->shared[j++], e); } new->shared_count = j; new->shared_max = max; @@ -120,11 +120,11 @@ int reservation_object_reserve_shared(struct reservation_object *obj) /* Drop the references to the signaled fences */ for (i = k; i < new->shared_max; ++i) { - struct dma_fence *fence; + void *e; - fence = rcu_dereference_protected(new->shared[i], - reservation_object_held(obj)); - dma_fence_put(fence); + e = rcu_dereference_protected(new->shared[i], + reservation_object_held(obj)); + dma_fence_put(reservation_object_shared_fence(e)); } kfree_rcu(old, rcu); @@ -141,7 +141,8 @@ EXPORT_SYMBOL(reservation_object_reserve_shared); * reservation_object_reserve_shared() has been called. */ void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence) + struct dma_fence *fence, + bool is_write) { struct reservation_object_list *fobj; unsigned int i; @@ -155,13 +156,17 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, for (i = 0; i < fobj->shared_count; ++i) { struct dma_fence *old_fence; + void *e; - old_fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(obj)); - if (old_fence->context == fence->context || + e = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(obj)); + old_fence = reservation_object_shared_fence(e); + if ((old_fence->context == fence->context && + reservation_object_shared_is_write(e) == is_write) || dma_fence_is_signaled(old_fence)) { /* memory barrier is added by write_seqcount_begin */ - RCU_INIT_POINTER(fobj->shared[i], fence); + RCU_INIT_POINTER(fobj->shared[i], + (void *)(is_write | (long)fence)); write_seqcount_end(&obj->seq); preempt_enable(); dma_fence_put(old_fence); @@ -173,7 +178,8 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, * memory barrier is added by write_seqcount_begin, * fobj->shared_count is protected by this lock too */ - RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); + RCU_INIT_POINTER(fobj->shared[fobj->shared_count], + (void *)(is_write | (long)fence)); fobj->shared_count++; write_seqcount_end(&obj->seq); @@ -213,8 +219,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, /* inplace update, no shared fences */ while (i--) - dma_fence_put(rcu_dereference_protected(old->shared[i], - reservation_object_held(obj))); + dma_fence_put(reservation_object_get_shared_fence(obj, old, i)); dma_fence_put(old_fence); } @@ -260,8 +265,10 @@ int reservation_object_copy_fences(struct reservation_object *dst, dst_list->shared_max = shared_count; for (i = 0; i < src_list->shared_count; ++i) { struct dma_fence *fence; + void *e; - fence = rcu_dereference(src_list->shared[i]); + e = rcu_dereference(src_list->shared[i]); + fence = reservation_object_shared_fence(e); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) continue; @@ -277,7 +284,7 @@ int reservation_object_copy_fences(struct reservation_object *dst, continue; } - rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence); + rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], e); } } else { dst_list = NULL; @@ -368,7 +375,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, shared = nshared; shared_count = fobj ? fobj->shared_count : 0; for (i = 0; i < shared_count; ++i) { - shared[i] = rcu_dereference(fobj->shared[i]); + void *e = rcu_dereference(fobj->shared[i]); + + shared[i] = reservation_object_shared_fence(e); if (!dma_fence_get_rcu(shared[i])) break; } @@ -456,8 +465,10 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, shared_count = fobj->shared_count; for (i = 0; !fence && i < shared_count; ++i) { - struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); + void *e = rcu_dereference(fobj->shared[i]); + struct dma_fence *lfence; + lfence = reservation_object_shared_fence(e); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) continue; @@ -545,8 +556,10 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, shared_count = fobj->shared_count; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); + void *e = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence; + fence = reservation_object_shared_fence(e); ret = reservation_object_test_signaled_single(fence); if (ret < 0) goto retry; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b0e14a3d54ef..303143b89275 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1342,7 +1342,7 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, struct reservation_object *resv = bo->tbo.resv; if (shared) - reservation_object_add_shared_fence(resv, fence); + reservation_object_add_shared_fence(resv, fence, true); else reservation_object_add_excl_fence(resv, fence); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 46ecd3e66ac9..5f4a872a88dd 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -214,7 +214,8 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) submit->out_fence); else reservation_object_add_shared_fence(etnaviv_obj->resv, - submit->out_fence); + submit->out_fence, + false); submit_unlock_object(submit, i); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 86f1f9aaa119..0415420e1cfd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4552,8 +4552,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, unsigned int shared_count = list->shared_count, i; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); + struct dma_fence *fence; + + fence = reservation_object_shared_fence( + rcu_dereference(list->shared[i])); args->busy |= busy_check_reader(fence); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eefd449502e2..85e3f92e87f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1771,7 +1771,7 @@ static void eb_export_fence(struct i915_vma *vma, if (flags & EXEC_OBJECT_WRITE) reservation_object_add_excl_fence(resv, &rq->fence); else if (reservation_object_reserve_shared(resv) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); + reservation_object_add_shared_fence(resv, &rq->fence, false); reservation_object_unlock(resv); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 95d25dbfde2b..fdee77627a0f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -672,7 +672,8 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, if (exclusive) reservation_object_add_excl_fence(msm_obj->resv, fence); else - reservation_object_add_shared_fence(msm_obj->resv, fence); + reservation_object_add_shared_fence(msm_obj->resv, fence, + false); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7214022dfb91..741faead4c7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1657,7 +1657,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl if (exclusive) reservation_object_add_excl_fence(resv, &fence->base); else if (fence) - reservation_object_add_shared_fence(resv, &fence->base); + reservation_object_add_shared_fence(resv, &fence->base, false); } struct ttm_bo_driver nouveau_bo_driver = { diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 7cb214577275..e68ab1efd809 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -466,7 +466,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) bo = entry->bo; qbo = to_qxl_bo(bo); - reservation_object_add_shared_fence(bo->resv, &release->base); + reservation_object_add_shared_fence(bo->resv, &release->base, + false); ttm_bo_add_to_lru(bo); reservation_object_unlock(bo->resv); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index ba2fd295697f..11114ffb7495 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -870,7 +870,7 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, struct reservation_object *resv = bo->tbo.resv; if (shared) - reservation_object_add_shared_fence(resv, &fence->base); + reservation_object_add_shared_fence(resv, &fence->base, false); else reservation_object_add_excl_fence(resv, &fence->base); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 820d97d3e8b9..9e98d8977cd0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -794,7 +794,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, spin_unlock(&man->move_lock); if (fence) { - reservation_object_add_shared_fence(bo->resv, fence); + reservation_object_add_shared_fence(bo->resv, fence, true); ret = reservation_object_reserve_shared(bo->resv); if (unlikely(ret)) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index e73ae0d22897..f82a460d106b 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -202,7 +202,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { bo = entry->bo; if (entry->shared) - reservation_object_add_shared_fence(bo->resv, fence); + reservation_object_add_shared_fence(bo->resv, fence, + true); else reservation_object_add_excl_fence(bo->resv, fence); ttm_bo_add_to_lru(bo); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 7910b9acedd6..9db15a76c25f 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -536,7 +536,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - reservation_object_add_shared_fence(bo->resv, exec->fence); + reservation_object_add_shared_fence(bo->resv, exec->fence, + false); } list_for_each_entry(bo, &exec->unref_list, unref_head) { diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index b28876c222b4..5d8b47056be6 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -194,7 +194,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, if (arg->flags & VGEM_FENCE_WRITE) reservation_object_add_excl_fence(resv, fence); else if ((ret = reservation_object_reserve_shared(resv)) == 0) - reservation_object_add_shared_fence(resv, fence); + reservation_object_add_shared_fence(resv, fence, false); reservation_object_unlock(resv); /* Record the fence in our idr for later signaling */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 8a3298574bf5..d73bf025df4b 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; + void * __rcu *shared[]; }; /** @@ -80,6 +80,8 @@ struct reservation_object { #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) #define reservation_object_assert_held(obj) \ lockdep_assert_held(&(obj)->lock.base) +#define reservation_object_shared_fence(e) ((struct dma_fence *)((long)e & ~1ul)) +#define reservation_object_shared_is_write(e) ((long)e & 1) /** * reservation_object_init - initialize a reservation object @@ -116,8 +118,11 @@ reservation_object_fini(struct reservation_object *obj) fobj = rcu_dereference_protected(obj->fence, 1); if (fobj) { - for (i = 0; i < fobj->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + for (i = 0; i < fobj->shared_count; ++i) { + void *e = rcu_dereference_protected(fobj->shared[i], 1); + + dma_fence_put(reservation_object_shared_fence(e)); + } kfree(fobj); } @@ -155,8 +160,10 @@ reservation_object_get_shared_fence(struct reservation_object *obj, struct reservation_object_list *list, unsigned int idx) { - return rcu_dereference_protected(list->shared[idx], - reservation_object_held(obj)); + void *e = rcu_dereference_protected(list->shared[idx], + reservation_object_held(obj)); + + return reservation_object_shared_fence(e); } /** @@ -282,7 +289,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence); + struct dma_fence *fence, + bool as_write); void reservation_object_add_excl_fence(struct reservation_object *obj, struct dma_fence *fence); -- 2.14.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx