Re: [PATCH 3/6] drm/gem: use new ww_mutex_(un)lock_for_each macros

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Jun 14, 2019 at 03:19:16PM +0200, Peter Zijlstra wrote:
> On Fri, Jun 14, 2019 at 02:41:22PM +0200, Christian König wrote:
> > Use the provided macros instead of implementing deadlock handling on our own.
> > 
> > Signed-off-by: Christian König <christian.koenig@xxxxxxx>
> > ---
> >  drivers/gpu/drm/drm_gem.c | 49 ++++++++++-----------------------------
> >  1 file changed, 12 insertions(+), 37 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > index 50de138c89e0..6e4623d3bee2 100644
> > --- a/drivers/gpu/drm/drm_gem.c
> > +++ b/drivers/gpu/drm/drm_gem.c
> > @@ -1307,51 +1307,26 @@ int
> >  drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> >  			  struct ww_acquire_ctx *acquire_ctx)
> >  {
> > -	int contended = -1;
> > +	struct ww_mutex *contended;
> >  	int i, ret;
> >  
> >  	ww_acquire_init(acquire_ctx, &reservation_ww_class);
> >  
> > -retry:
> > -	if (contended != -1) {
> > -		struct drm_gem_object *obj = objs[contended];
> > -
> > -		ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
> > -						       acquire_ctx);
> > -		if (ret) {
> > -			ww_acquire_done(acquire_ctx);
> > -			return ret;
> > -		}
> > -	}
> > -
> > -	for (i = 0; i < count; i++) {
> > -		if (i == contended)
> > -			continue;
> > -
> > -		ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
> > -						  acquire_ctx);
> > -		if (ret) {
> > -			int j;
> > -
> > -			for (j = 0; j < i; j++)
> > -				ww_mutex_unlock(&objs[j]->resv->lock);
> > -
> > -			if (contended != -1 && contended >= i)
> > -				ww_mutex_unlock(&objs[contended]->resv->lock);
> > -
> > -			if (ret == -EDEADLK) {
> > -				contended = i;
> > -				goto retry;
> > -			}
> > -
> > -			ww_acquire_done(acquire_ctx);
> > -			return ret;
> > -		}
> > -	}
> 
> I note all the sites you use this on are simple idx iterators; so how
> about something like so:
> 
> int ww_mutex_unlock_all(int count, void *data, struct ww_mutex *(*func)(int, void *))
> {
> 	int i;
> 
> 	for (i = 0; i < count; i++) {
> 		lock = func(i, data);
> 		ww_mutex_unlock(lock);
> 	}
> }
> 
> int ww_mutex_lock_all(int count, struct ww_acquire_context *acquire_ctx, bool intr,
> 		      void *data, struct ww_mutex *(*func)(int, void *))
> {
> 	int i, ret, contended = -1;
> 	struct ww_mutex *lock;
> 
> retry:
> 	if (contended != -1) {
> 		lock = func(contended, data);
> 		if (intr)
> 			ret = ww_mutex_lock_slow_interruptible(lock, acquire_ctx);
> 		else
> 			ret = ww_mutex_lock_slow(lock, acquire_ctx), 0;
> 
> 		if (ret) {
> 			ww_acquire_done(acquire_ctx);
> 			return ret;
> 		}
> 	}
> 
> 	for (i = 0; i < count; i++) {
> 		if (i == contended)
> 			continue;
> 
> 		lock = func(i, data);
> 		if (intr)
> 			ret = ww_mutex_lock_interruptible(lock, acquire_ctx);
> 		else
> 			ret = ww_mutex_lock(lock, acquire_ctx), 0;
> 
> 		if (ret) {
> 			ww_mutex_unlock_all(i, data, func);
> 			if (contended > i) {
> 				lock = func(contended, data);
> 				ww_mutex_unlock(lock);
> 			}
> 
> 			if (ret == -EDEADLK) {
> 				contended = i;
> 				goto retry;
> 			}
> 
> 			ww_acquire_done(acquire_ctx);
> 			return ret;
> 		}
> 	}
> 
> 	ww_acquire_done(acquire_ctx);
> 	return 0;
> }
> 
> > +	ww_mutex_lock_for_each(for (i = 0; i < count; i++),
> > +			       &objs[i]->resv->lock, contended, ret, true,
> > +			       acquire_ctx)
> > +		if (ret)
> > +			goto error;
> 
> which then becomes:
> 
> struct ww_mutex *gem_ww_mutex_func(int i, void *data)
> {
> 	struct drm_gem_object **objs = data;
> 	return &objs[i]->resv->lock;
> }
> 
> 	ret = ww_mutex_lock_all(count, acquire_ctx, true, objs, gem_ww_mutex_func);
> 
> >  	ww_acquire_done(acquire_ctx);
> >  
> >  	return 0;
> > +
> > +error:
> > +	ww_mutex_unlock_for_each(for (i = 0; i < count; i++),
> > +				 &objs[i]->resv->lock, contended);
> > +	ww_acquire_done(acquire_ctx);
> > +	return ret;
> >  }
> >  EXPORT_SYMBOL(drm_gem_lock_reservations);

Another idea, entirely untested (I guess making sure that we can use the
same iterator for both locking and unlocking in the contended case will be
fun), but maybe something like this:

	WW_MUTEX_LOCK_BEGIN();
	driver_for_each_loop (iter, pos) {
		WW_MUTEX_LOCK(&pos->ww_mutex);
	}
	WW_MUTEX_LOCK_END();

That way we can reuse any and all iterators that'll ever show up at least.
It's still horrible because the macros need to jump around between all of
them. Would also make this useful for more cases, where maybe you need to
trylock some lru lock to get at your next ww_mutex, or do some
kref_get_unless_zero. Buffer eviction loops tend to acquire these, and
that would all get ugly real fast if we'd need to stuff it into some
iterator argument.

This is kinda what we went with for modeset locks with
DRM_MODESET_LOCK_ALL_BEGIN/END, you can grab more locks in between the
pair at least. But it's a lot more limited use-cases, maybe too fragile an
idea for ww_mutex in full generality.

Not going to type this out because too much w/e mode here already, but I
can give it a stab next week.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux