Re: [PATCH WW 09/13] drm/ttm: inline ttm_bo_reserve and related calls

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jun 27, 2013 at 01:48:24PM +0200, Maarten Lankhorst wrote:
> Makes lockdep a lot more useful.
> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxx>
> ---
>  drivers/gpu/drm/ttm/ttm_bo.c           | 105 ++------------------
>  drivers/gpu/drm/ttm/ttm_execbuf_util.c |   9 +-
>  include/drm/ttm/ttm_bo_driver.h        | 175 ++++++++++++++++++++-------------

The function movement in the header makes the diff a bit hard to read. So
if possible I think that should be avoided. Anyway I think I've spotted a
few kerneldoc updates (like s/EAGAIN/EDEADLK/) which should be part of the
main conversion patch.
-Daniel

>  3 files changed, 117 insertions(+), 172 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 5f9fe80..a8a27f5 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -182,6 +182,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
>  		}
>  	}
>  }
> +EXPORT_SYMBOL(ttm_bo_add_to_lru);
>  
>  int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
>  {
> @@ -204,35 +205,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
>  	return put_count;
>  }
>  
> -int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
> -			  bool interruptible,
> -			  bool no_wait, bool use_ticket,
> -			  struct ww_acquire_ctx *ticket)
> -{
> -	int ret = 0;
> -
> -	if (no_wait) {
> -		bool success;
> -
> -		/* not valid any more, fix your locking! */
> -		if (WARN_ON(ticket))
> -			return -EBUSY;
> -
> -		success = ww_mutex_trylock(&bo->resv->lock);
> -		return success ? 0 : -EBUSY;
> -	}
> -
> -	if (interruptible)
> -		ret = ww_mutex_lock_interruptible(&bo->resv->lock,
> -						  ticket);
> -	else
> -		ret = ww_mutex_lock(&bo->resv->lock, ticket);
> -	if (ret == -EINTR)
> -		return -ERESTARTSYS;
> -	return ret;
> -}
> -EXPORT_SYMBOL(ttm_bo_reserve);
> -
>  static void ttm_bo_ref_bug(struct kref *list_kref)
>  {
>  	BUG();
> @@ -245,77 +217,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
>  		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
>  }
>  
> -int ttm_bo_reserve(struct ttm_buffer_object *bo,
> -		   bool interruptible,
> -		   bool no_wait, bool use_ticket,
> -		   struct ww_acquire_ctx *ticket)
> -{
> -	struct ttm_bo_global *glob = bo->glob;
> -	int put_count = 0;
> -	int ret;
> -
> -	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
> -				    ticket);
> -	if (likely(ret == 0)) {
> -		spin_lock(&glob->lru_lock);
> -		put_count = ttm_bo_del_from_lru(bo);
> -		spin_unlock(&glob->lru_lock);
> -		ttm_bo_list_ref_sub(bo, put_count, true);
> -	}
> -
> -	return ret;
> -}
> -
> -int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
> -			    bool interruptible, struct ww_acquire_ctx *ticket)
> -{
> -	struct ttm_bo_global *glob = bo->glob;
> -	int put_count = 0;
> -	int ret = 0;
> -
> -	if (interruptible)
> -		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
> -						       ticket);
> -	else
> -		ww_mutex_lock_slow(&bo->resv->lock, ticket);
> -
> -	if (likely(ret == 0)) {
> -		spin_lock(&glob->lru_lock);
> -		put_count = ttm_bo_del_from_lru(bo);
> -		spin_unlock(&glob->lru_lock);
> -		ttm_bo_list_ref_sub(bo, put_count, true);
> -	} else if (ret == -EINTR)
> -		ret = -ERESTARTSYS;
> -
> -	return ret;
> -}
> -EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
> -
> -void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
> +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
>  {
> -	ttm_bo_add_to_lru(bo);
> -	ww_mutex_unlock(&bo->resv->lock);
> -}
> -
> -void ttm_bo_unreserve(struct ttm_buffer_object *bo)
> -{
> -	struct ttm_bo_global *glob = bo->glob;
> -
> -	spin_lock(&glob->lru_lock);
> -	ttm_bo_unreserve_ticket_locked(bo, NULL);
> -	spin_unlock(&glob->lru_lock);
> -}
> -EXPORT_SYMBOL(ttm_bo_unreserve);
> -
> -void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
> -{
> -	struct ttm_bo_global *glob = bo->glob;
> +	int put_count;
>  
> -	spin_lock(&glob->lru_lock);
> -	ttm_bo_unreserve_ticket_locked(bo, ticket);
> -	spin_unlock(&glob->lru_lock);
> +	spin_lock(&bo->glob->lru_lock);
> +	put_count = ttm_bo_del_from_lru(bo);
> +	spin_unlock(&bo->glob->lru_lock);
> +	ttm_bo_list_ref_sub(bo, put_count, true);
>  }
> -EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
> +EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
>  
>  /*
>   * Call bo->mutex locked.
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 7392da5..6c91178 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -44,12 +44,10 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list,
>  
>  		entry->reserved = false;
>  		if (entry->removed) {
> -			ttm_bo_unreserve_ticket_locked(bo, ticket);
> +			ttm_bo_add_to_lru(bo);
>  			entry->removed = false;
> -
> -		} else {
> -			ww_mutex_unlock(&bo->resv->lock);
>  		}
> +		ww_mutex_unlock(&bo->resv->lock);
>  	}
>  }
>  
> @@ -220,7 +218,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
>  		bo = entry->bo;
>  		entry->old_sync_obj = bo->sync_obj;
>  		bo->sync_obj = driver->sync_obj_ref(sync_obj);
> -		ttm_bo_unreserve_ticket_locked(bo, ticket);
> +		ttm_bo_add_to_lru(bo);
> +		ww_mutex_unlock(&bo->resv->lock);
>  		entry->reserved = false;
>  	}
>  	spin_unlock(&bdev->fence_lock);
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index ec18c5f..984fc2d 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -33,6 +33,7 @@
>  #include <ttm/ttm_bo_api.h>
>  #include <ttm/ttm_memory.h>
>  #include <ttm/ttm_module.h>
> +#include <ttm/ttm_placement.h>
>  #include <drm/drm_mm.h>
>  #include <drm/drm_global.h>
>  #include <linux/workqueue.h>
> @@ -772,6 +773,55 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
>  			   bool interruptible);
>  extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
>  
> +extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
> +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
> +
> +/**
> + * ttm_bo_reserve_nolru:
> + *
> + * @bo: A pointer to a struct ttm_buffer_object.
> + * @interruptible: Sleep interruptible if waiting.
> + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
> + * @use_ticket: If @bo is already reserved, Only sleep waiting for
> + * it to become unreserved if @ticket->stamp is older.
> + *
> + * Will not remove reserved buffers from the lru lists.
> + * Otherwise identical to ttm_bo_reserve.
> + *
> + * Returns:
> + * -EDEADLK: The reservation may cause a deadlock.
> + * Release all buffer reservations, wait for @bo to become unreserved and
> + * try again. (only if use_sequence == 1).
> + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
> + * a signal. Release all buffer reservations and return to user-space.
> + * -EBUSY: The function needed to sleep, but @no_wait was true
> + * -EALREADY: Bo already reserved using @ticket. This error code will only
> + * be returned if @use_ticket is set to true.
> + */
> +static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
> +				       bool interruptible,
> +				       bool no_wait, bool use_ticket,
> +				       struct ww_acquire_ctx *ticket)
> +{
> +	int ret = 0;
> +
> +	if (no_wait) {
> +		bool success;
> +		if (WARN_ON(ticket))
> +			return -EBUSY;
> +
> +		success = ww_mutex_trylock(&bo->resv->lock);
> +		return success ? 0 : -EBUSY;
> +	}
> +
> +	if (interruptible)
> +		ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
> +	else
> +		ret = ww_mutex_lock(&bo->resv->lock, ticket);
> +	if (ret == -EINTR)
> +		return -ERESTARTSYS;
> +	return ret;
> +}
>  
>  /**
>   * ttm_bo_reserve:
> @@ -780,7 +830,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
>   * @interruptible: Sleep interruptible if waiting.
>   * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
>   * @use_ticket: If @bo is already reserved, Only sleep waiting for
> - * it to become unreserved if @sequence < (@bo)->sequence.
> + * it to become unreserved if @ticket->stamp is older.
>   *
>   * Locks a buffer object for validation. (Or prevents other processes from
>   * locking it for validation) and removes it from lru lists, while taking
> @@ -794,7 +844,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
>   * Processes attempting to reserve multiple buffers other than for eviction,
>   * (typically execbuf), should first obtain a unique 32-bit
>   * validation sequence number,
> - * and call this function with @use_sequence == 1 and @sequence == the unique
> + * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
>   * sequence number. If upon call of this function, the buffer object is already
>   * reserved, the validation sequence is checked against the validation
>   * sequence of the process currently reserving the buffer,
> @@ -809,37 +859,31 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
>   * will eventually succeed, preventing both deadlocks and starvation.
>   *
>   * Returns:
> - * -EAGAIN: The reservation may cause a deadlock.
> + * -EDEADLK: The reservation may cause a deadlock.
>   * Release all buffer reservations, wait for @bo to become unreserved and
>   * try again. (only if use_sequence == 1).
>   * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
>   * a signal. Release all buffer reservations and return to user-space.
>   * -EBUSY: The function needed to sleep, but @no_wait was true
> - * -EDEADLK: Bo already reserved using @sequence. This error code will only
> - * be returned if @use_sequence is set to true.
> + * -EALREADY: Bo already reserved using @ticket. This error code will only
> + * be returned if @use_ticket is set to true.
>   */
> -extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
> -			  bool interruptible,
> -			  bool no_wait, bool use_ticket,
> -			  struct ww_acquire_ctx *ticket);
> +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
> +				 bool interruptible,
> +				 bool no_wait, bool use_ticket,
> +				 struct ww_acquire_ctx *ticket)
> +{
> +	int ret;
>  
> -/**
> - * ttm_bo_reserve_slowpath_nolru:
> - * @bo: A pointer to a struct ttm_buffer_object.
> - * @interruptible: Sleep interruptible if waiting.
> - * @sequence: Set (@bo)->sequence to this value after lock
> - *
> - * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
> - * from all our other reservations. Because there are no other reservations
> - * held by us, this function cannot deadlock any more.
> - *
> - * Will not remove reserved buffers from the lru lists.
> - * Otherwise identical to ttm_bo_reserve_slowpath.
> - */
> -extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
> -					 bool interruptible,
> -					 struct ww_acquire_ctx *ticket);
> +	WARN_ON(!atomic_read(&bo->kref.refcount));
> +
> +	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
> +				    ticket);
> +	if (likely(ret == 0))
> +		ttm_bo_del_sub_from_lru(bo);
>  
> +	return ret;
> +}
>  
>  /**
>   * ttm_bo_reserve_slowpath:
> @@ -851,45 +895,27 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
>   * from all our other reservations. Because there are no other reservations
>   * held by us, this function cannot deadlock any more.
>   */
> -extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
> -				   bool interruptible,
> -				   struct ww_acquire_ctx *ticket);
> +static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
> +					  bool interruptible,
> +					  struct ww_acquire_ctx *ticket)
> +{
> +	int ret = 0;
>  
> -/**
> - * ttm_bo_reserve_nolru:
> - *
> - * @bo: A pointer to a struct ttm_buffer_object.
> - * @interruptible: Sleep interruptible if waiting.
> - * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
> - * @use_sequence: If @bo is already reserved, Only sleep waiting for
> - * it to become unreserved if @sequence < (@bo)->sequence.
> - *
> - * Will not remove reserved buffers from the lru lists.
> - * Otherwise identical to ttm_bo_reserve.
> - *
> - * Returns:
> - * -EAGAIN: The reservation may cause a deadlock.
> - * Release all buffer reservations, wait for @bo to become unreserved and
> - * try again. (only if use_sequence == 1).
> - * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
> - * a signal. Release all buffer reservations and return to user-space.
> - * -EBUSY: The function needed to sleep, but @no_wait was true
> - * -EDEADLK: Bo already reserved using @sequence. This error code will only
> - * be returned if @use_sequence is set to true.
> - */
> -extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
> -				 bool interruptible,
> -				 bool no_wait, bool use_ticket,
> -				 struct ww_acquire_ctx *ticket);
> +	WARN_ON(!atomic_read(&bo->kref.refcount));
>  
> -/**
> - * ttm_bo_unreserve
> - *
> - * @bo: A pointer to a struct ttm_buffer_object.
> - *
> - * Unreserve a previous reservation of @bo.
> - */
> -extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
> +	if (interruptible)
> +		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
> +						       ticket);
> +	else
> +		ww_mutex_lock_slow(&bo->resv->lock, ticket);
> +
> +	if (likely(ret == 0))
> +		ttm_bo_del_sub_from_lru(bo);
> +	else if (ret == -EINTR)
> +		ret = -ERESTARTSYS;
> +
> +	return ret;
> +}
>  
>  /**
>   * ttm_bo_unreserve_ticket
> @@ -898,19 +924,28 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
>   *
>   * Unreserve a previous reservation of @bo made with @ticket.
>   */
> -extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
> -				    struct ww_acquire_ctx *ticket);
> +static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
> +					   struct ww_acquire_ctx *t)
> +{
> +	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
> +		spin_lock(&bo->glob->lru_lock);
> +		ttm_bo_add_to_lru(bo);
> +		spin_unlock(&bo->glob->lru_lock);
> +	}
> +	ww_mutex_unlock(&bo->resv->lock);
> +}
>  
>  /**
> - * ttm_bo_unreserve_locked
> + * ttm_bo_unreserve
> + *
>   * @bo: A pointer to a struct ttm_buffer_object.
> - * @ticket: ww_acquire_ctx used for reserving, or NULL
>   *
> - * Unreserve a previous reservation of @bo made with @ticket.
> - * Needs to be called with struct ttm_bo_global::lru_lock held.
> + * Unreserve a previous reservation of @bo.
>   */
> -extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo,
> -					   struct ww_acquire_ctx *ticket);
> +static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
> +{
> +	ttm_bo_unreserve_ticket(bo, NULL);
> +}
>  
>  /*
>   * ttm_bo_util.c
> -- 
> 1.8.3.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@xxxxxxxxxxxxxxxxxxxxx
> http://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux