On Fri, Sep 17, 2021 at 02:34:50PM +0200, Christian König wrote: > This makes the function much simpler since the complex > retry logic is now handled else where. > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > --- > drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++---------------------- > 1 file changed, 35 insertions(+), 51 deletions(-) > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c > index a3c79a99fb44..406150dea5e4 100644 > --- a/drivers/dma-buf/dma-resv.c > +++ b/drivers/dma-buf/dma-resv.c > @@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk); > */ > int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) > { > - struct dma_resv_list *src_list, *dst_list; > - struct dma_fence *old, *new; > - unsigned int i; > + struct dma_resv_iter cursor; > + struct dma_resv_list *list; > + struct dma_fence *f, *excl; > > dma_resv_assert_held(dst); > > - rcu_read_lock(); > - src_list = dma_resv_shared_list(src); > - > -retry: > - if (src_list) { > - unsigned int shared_count = src_list->shared_count; > - > - rcu_read_unlock(); > + list = NULL; > + excl = NULL; > > - dst_list = dma_resv_list_alloc(shared_count); > - if (!dst_list) > - return -ENOMEM; > + rcu_read_lock(); > + dma_resv_iter_begin(&cursor, src, true); > + dma_resv_for_each_fence_unlocked(&cursor, f) { > > - rcu_read_lock(); > - src_list = dma_resv_shared_list(src); > - if (!src_list || src_list->shared_count > shared_count) { > - kfree(dst_list); > - goto retry; > - } > + if (cursor.is_first) { Maybe have a wrapper for this, like dma_resv_iter_is_reset or is_first or is_restart (my preference) with some nice docs that this returns true everytime we had to restart the sequence? Otherwise I fully agree, this is so much better with all the hairy restarting and get_rcu and test_bit shovelled away somewhere. Either way (but I much prefer a wrapper for is_first): Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> > + dma_resv_list_free(list); > + dma_fence_put(excl); > > - dst_list->shared_count = 0; > - for (i = 0; i < src_list->shared_count; ++i) { > - struct dma_fence __rcu **dst; > - struct dma_fence *fence; > + if (cursor.fences) { > + unsigned int cnt = cursor.fences->shared_count; > > - fence = rcu_dereference(src_list->shared[i]); > - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, > - &fence->flags)) > - continue; > + rcu_read_unlock(); > + list = dma_resv_list_alloc(cnt); > + if (!list) { > + dma_resv_iter_end(&cursor); > + return -ENOMEM; > + } > > - if (!dma_fence_get_rcu(fence)) { > - dma_resv_list_free(dst_list); > - src_list = dma_resv_shared_list(src); > - goto retry; > - } > + list->shared_count = 0; > + rcu_read_lock(); > > - if (dma_fence_is_signaled(fence)) { > - dma_fence_put(fence); > - continue; > + } else { > + list = NULL; > } > - > - dst = &dst_list->shared[dst_list->shared_count++]; > - rcu_assign_pointer(*dst, fence); > + excl = NULL; > } > - } else { > - dst_list = NULL; > - } > > - new = dma_fence_get_rcu_safe(&src->fence_excl); > + dma_fence_get(f); > + if (dma_resv_iter_is_exclusive(&cursor)) > + excl = f; > + else > + RCU_INIT_POINTER(list->shared[list->shared_count++], f); > + } > + dma_resv_iter_end(&cursor); > rcu_read_unlock(); > > - src_list = dma_resv_shared_list(dst); > - old = dma_resv_excl_fence(dst); > - > write_seqcount_begin(&dst->seq); > - /* write_seqcount_begin provides the necessary memory barrier */ > - RCU_INIT_POINTER(dst->fence_excl, new); > - RCU_INIT_POINTER(dst->fence, dst_list); > + excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst)); > + list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst)); > write_seqcount_end(&dst->seq); > > - dma_resv_list_free(src_list); > - dma_fence_put(old); > + dma_resv_list_free(list); > + dma_fence_put(excl); > > return 0; > } > -- > 2.25.1 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch