On Fri, Sep 17, 2021 at 02:34:51PM +0200, Christian König wrote: > This makes the function much simpler since the complex > retry logic is now handled elsewhere. > > v2: use sizeof(void*) instead > > Signed-off-by: Christian König <christian.koenig@xxxxxxx> > --- > drivers/dma-buf/dma-resv.c | 112 +++++++++++++------------------------ > 1 file changed, 40 insertions(+), 72 deletions(-) > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c > index 406150dea5e4..9b90bd9ac018 100644 > --- a/drivers/dma-buf/dma-resv.c > +++ b/drivers/dma-buf/dma-resv.c > @@ -487,99 +487,67 @@ EXPORT_SYMBOL(dma_resv_copy_fences); > * dma_resv_get_fences - Get an object's shared and exclusive > * fences without update side lock held > * @obj: the reservation object > - * @pfence_excl: the returned exclusive fence (or NULL) > - * @pshared_count: the number of shared fences returned > - * @pshared: the array of shared fence ptrs returned (array is krealloc'd to > + * @fence_excl: the returned exclusive fence (or NULL) > + * @shared_count: the number of shared fences returned > + * @shared: the array of shared fence ptrs returned (array is krealloc'd to > * the required size, and must be freed by caller) > * > * Retrieve all fences from the reservation object. If the pointer for the > * exclusive fence is not specified the fence is put into the array of the > * shared fences as well. Returns either zero or -ENOMEM. > */ > -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, > - unsigned int *pshared_count, > - struct dma_fence ***pshared) > +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl, > + unsigned int *shared_count, struct dma_fence ***shared) > { > - struct dma_fence **shared = NULL; > - struct dma_fence *fence_excl; > - unsigned int shared_count; > - int ret = 1; > - > - do { > - struct dma_resv_list *fobj; > - unsigned int i, seq; > - size_t sz = 0; > - > - shared_count = i = 0; > - > - rcu_read_lock(); > - seq = read_seqcount_begin(&obj->seq); > + struct dma_resv_iter cursor; > + struct dma_fence *fence; > > - fence_excl = dma_resv_excl_fence(obj); > - if (fence_excl && !dma_fence_get_rcu(fence_excl)) > - goto unlock; > + *shared_count = 0; > + *shared = NULL; > > - fobj = dma_resv_shared_list(obj); > - if (fobj) > - sz += sizeof(*shared) * fobj->shared_max; > + if (fence_excl) > + *fence_excl = NULL; > > - if (!pfence_excl && fence_excl) > - sz += sizeof(*shared); > + rcu_read_lock(); > + dma_resv_iter_begin(&cursor, obj, true); > + dma_resv_for_each_fence_unlocked(&cursor, fence) { > > - if (sz) { > - struct dma_fence **nshared; > + if (cursor.is_first) { Yeah with the second one here I definitely think we need a dma_resv_iter_is_restart() helper. I'm not sure whether that should have is_first or restart_only semantics, but I guess gcc wont see through the maze anyway, and hence initializing everything to NULL/0 is required. Also is_first is a bit confusing naming imo. You mean "is this the first fence" but readers could equally read this as "is this the first time we're in the loop", which is rather confusing. Hence why I think an iter_is_restart() or maybe iter_restarted() naming is a notch clearer. > + unsigned int count; > > - nshared = krealloc(shared, sz, > - GFP_NOWAIT | __GFP_NOWARN); > - if (!nshared) { > - rcu_read_unlock(); > + while (*shared_count) > + dma_fence_put((*shared)[--(*shared_count)]); > > - dma_fence_put(fence_excl); > - fence_excl = NULL; > + if (fence_excl) > + dma_fence_put(*fence_excl); > > - nshared = krealloc(shared, sz, GFP_KERNEL); > - if (nshared) { > - shared = nshared; > - continue; > - } > + count = cursor.fences ? cursor.fences->shared_count : 0; > + count += fence_excl ? 0 : 1; > + rcu_read_unlock(); > > - ret = -ENOMEM; > - break; > - } > - shared = nshared; > - shared_count = fobj ? fobj->shared_count : 0; > - for (i = 0; i < shared_count; ++i) { > - shared[i] = rcu_dereference(fobj->shared[i]); > - if (!dma_fence_get_rcu(shared[i])) > - break; > + /* Eventually re-allocate the array */ > + *shared = krealloc_array(*shared, count, > + sizeof(void *), > + GFP_KERNEL); > + if (count && !*shared) { > + dma_resv_iter_end(&cursor); > + return -ENOMEM; > } > + rcu_read_lock(); > } > > - if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { > - while (i--) > - dma_fence_put(shared[i]); > - dma_fence_put(fence_excl); > - goto unlock; > - } > - > - ret = 0; > -unlock: > - rcu_read_unlock(); > - } while (ret); > - > - if (pfence_excl) > - *pfence_excl = fence_excl; > - else if (fence_excl) > - shared[shared_count++] = fence_excl; > + if (dma_resv_iter_is_exclusive(&cursor) && fence_excl) > + *fence_excl = fence; > + else > + (*shared)[(*shared_count)++] = fence; > > - if (!shared_count) { > - kfree(shared); > - shared = NULL; > + /* Don't drop the reference */ > + fence = NULL; > } > + dma_resv_iter_end(&cursor); > + rcu_read_unlock(); > > - *pshared_count = shared_count; > - *pshared = shared; > - return ret; > + return 0; > } > EXPORT_SYMBOL_GPL(dma_resv_get_fences); With the wrapper I'd like to have: Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> > > -- > 2.25.1 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch