On Mon, Mar 21, 2022 at 02:58:34PM +0100, Christian König wrote: > This function allows to replace fences from the shared fence list when > we can gurantee that the operation represented by the original fence has > finished or no accesses to the resources protected by the dma_resv > object any more when the new fence finishes. > > Then use this function in the amdkfd code when BOs are unmapped from the > process. > > v2: add an example when this is usefull. Yeah this makes a looooot more sense now :-) Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > --- > drivers/dma-buf/dma-resv.c | 45 +++++++++++++++++ > .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 49 +++---------------- > include/linux/dma-resv.h | 2 + > 3 files changed, 54 insertions(+), 42 deletions(-) > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c > index b51416405e86..509060861cf3 100644 > --- a/drivers/dma-buf/dma-resv.c > +++ b/drivers/dma-buf/dma-resv.c > @@ -289,6 +289,51 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) > } > EXPORT_SYMBOL(dma_resv_add_shared_fence); > > +/** > + * dma_resv_replace_fences - replace fences in the dma_resv obj > + * @obj: the reservation object > + * @context: the context of the fences to replace > + * @replacement: the new fence to use instead > + * > + * Replace fences with a specified context with a new fence. Only valid if the > + * operation represented by the original fence has no longer access to the > + * resources represented by the dma_resv object when the new fence completes. > + * > + * And example for using this is replacing a preemption fence with a page table > + * update fence which makes the resource inaccessible. > + */ > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, > + struct dma_fence *replacement) > +{ > + struct dma_resv_list *list; > + struct dma_fence *old; > + unsigned int i; > + > + dma_resv_assert_held(obj); > + > + write_seqcount_begin(&obj->seq); > + > + old = dma_resv_excl_fence(obj); > + if (old->context == context) { > + RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement)); > + dma_fence_put(old); > + } > + > + list = dma_resv_shared_list(obj); > + for (i = 0; list && i < list->shared_count; ++i) { > + old = rcu_dereference_protected(list->shared[i], > + dma_resv_held(obj)); > + if (old->context != context) > + continue; > + > + rcu_assign_pointer(list->shared[i], dma_fence_get(replacement)); > + dma_fence_put(old); > + } > + > + write_seqcount_end(&obj->seq); > +} > +EXPORT_SYMBOL(dma_resv_replace_fences); > + > /** > * dma_resv_add_excl_fence - Add an exclusive fence. > * @obj: the reservation object > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > index f9bab963a948..b6f266f612ea 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > @@ -253,53 +253,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) > static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, > struct amdgpu_amdkfd_fence *ef) > { > - struct dma_resv *resv = bo->tbo.base.resv; > - struct dma_resv_list *old, *new; > - unsigned int i, j, k; > + struct dma_fence *replacement; > > if (!ef) > return -EINVAL; > > - old = dma_resv_shared_list(resv); > - if (!old) > - return 0; > - > - new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); > - if (!new) > - return -ENOMEM; > - > - /* Go through all the shared fences in the resevation object and sort > - * the interesting ones to the end of the list. > + /* TODO: Instead of block before we should use the fence of the page > + * table update and TLB flush here directly. > */ > - for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { > - struct dma_fence *f; > - > - f = rcu_dereference_protected(old->shared[i], > - dma_resv_held(resv)); > - > - if (f->context == ef->base.context) > - RCU_INIT_POINTER(new->shared[--j], f); > - else > - RCU_INIT_POINTER(new->shared[k++], f); > - } > - new->shared_max = old->shared_max; > - new->shared_count = k; > - > - /* Install the new fence list, seqcount provides the barriers */ > - write_seqcount_begin(&resv->seq); > - RCU_INIT_POINTER(resv->fence, new); > - write_seqcount_end(&resv->seq); > - > - /* Drop the references to the removed fences or move them to ef_list */ > - for (i = j; i < old->shared_count; ++i) { > - struct dma_fence *f; > - > - f = rcu_dereference_protected(new->shared[i], > - dma_resv_held(resv)); > - dma_fence_put(f); > - } > - kfree_rcu(old, rcu); > - > + replacement = dma_fence_get_stub(); > + dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, > + replacement); > + dma_fence_put(replacement); > return 0; > } > > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h > index afdfdfac729f..3f53177bdb46 100644 > --- a/include/linux/dma-resv.h > +++ b/include/linux/dma-resv.h > @@ -468,6 +468,8 @@ void dma_resv_init(struct dma_resv *obj); > void dma_resv_fini(struct dma_resv *obj); > int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); > void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, > + struct dma_fence *fence); > void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); > int dma_resv_get_fences(struct dma_resv *obj, bool write, > unsigned int *num_fences, struct dma_fence ***fences); > -- > 2.25.1 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch