Re: [PATCH 2/6] drm/omap: gem: Merge __omap_gem_get_pages() and omap_gem_attach_pages()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am Montag, den 02.04.2018, 21:50 +0300 schrieb Laurent Pinchart:
> The __omap_gem_get_pages() function is a wrapper around
> omap_gem_attach_pages() that returns the omap_obj->pages pointer through
> a function argument. Some callers don't need the pages pointer, and all
> of them can access omap_obj->pages directly. To simplify the code merge
> the __omap_gem_get_pages() wrapper with omap_gem_attach_pages() and
> update the callers accordingly.
> 
> > Signed-off-by: Laurent Pinchart <laurent.pinchart@xxxxxxxxxxxxxxxx>
> ---
>  drivers/gpu/drm/omapdrm/omap_gem.c | 62 ++++++++++++++------------------------
>  1 file changed, 23 insertions(+), 39 deletions(-)
> 
> diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
> index 6cfcf60cffe3..13fea207343e 100644
> --- a/drivers/gpu/drm/omapdrm/omap_gem.c
> +++ b/drivers/gpu/drm/omapdrm/omap_gem.c
> @@ -222,7 +222,10 @@ static void omap_gem_evict(struct drm_gem_object *obj)
>   * Page Management
>   */
>  
> -/** ensure backing pages are allocated */
> +/*
> + * Ensure backing pages are allocated. Must be called with the omap_obj.lock
> + * held.
> + */

Drive-by comment: I always find it hard to validate those comment-only
lock prerequisites, especially if callstacks get deeper.

What we do in etnaviv is to make those lock comments executable by
using lockdep_assert_held() to validate the locking assumptions. This
makes sure that if you ever manage to violate the locking in a code
rework, a lockdep enabled debug build will explode right at the spot.

Regards,
Lucas

>  static int omap_gem_attach_pages(struct drm_gem_object *obj)
>  {
> >  	struct drm_device *dev = obj->dev;
> @@ -232,7 +235,12 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
> >  	int i, ret;
> >  	dma_addr_t *addrs;
>  
> > -	WARN_ON(omap_obj->pages);
> > +	/*
> > +	 * If not using shmem (in which case backing pages don't need to be
> > +	 * allocated) or if pages are already allocated we're done.
> > +	 */
> > +	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
> > +		return 0;
>  
> >  	pages = drm_gem_get_pages(obj);
> >  	if (IS_ERR(pages)) {
> @@ -288,29 +296,6 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
> >  	return ret;
>  }
>  
> -/* acquire pages when needed (for example, for DMA where physically
> - * contiguous buffer is not required
> - */
> -static int __omap_gem_get_pages(struct drm_gem_object *obj,
> > -				struct page ***pages)
> -{
> > -	struct omap_gem_object *omap_obj = to_omap_bo(obj);
> > -	int ret = 0;
> -
> > -	if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
> > -		ret = omap_gem_attach_pages(obj);
> > -		if (ret) {
> > -			dev_err(obj->dev->dev, "could not attach pages\n");
> > -			return ret;
> > -		}
> > -	}
> -
> > -	/* TODO: even phys-contig.. we should have a list of pages? */
> > -	*pages = omap_obj->pages;
> -
> > -	return 0;
> -}
> -
>  /** release backing pages */
>  static void omap_gem_detach_pages(struct drm_gem_object *obj)
>  {
> @@ -516,7 +501,6 @@ int omap_gem_fault(struct vm_fault *vmf)
> >  	struct drm_gem_object *obj = vma->vm_private_data;
> >  	struct omap_gem_object *omap_obj = to_omap_bo(obj);
> >  	struct drm_device *dev = obj->dev;
> > -	struct page **pages;
> >  	int ret;
>  
> >  	/* Make sure we don't parallel update on a fault, nor move or remove
> @@ -525,7 +509,7 @@ int omap_gem_fault(struct vm_fault *vmf)
> >  	mutex_lock(&dev->struct_mutex);
>  
> >  	/* if a shmem backed object, make sure we have pages attached now */
> > -	ret = __omap_gem_get_pages(obj, &pages);
> > +	ret = omap_gem_attach_pages(obj);
> >  	if (ret)
> >  		goto fail;
>  
> @@ -694,12 +678,12 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
>  
> >  	/* if we aren't mapped yet, we don't need to do anything */
> >  	if (omap_obj->block) {
> > -		struct page **pages;
> -
> > -		ret = __omap_gem_get_pages(obj, &pages);
> > +		ret = omap_gem_attach_pages(obj);
> >  		if (ret)
> >  			goto fail;
> > -		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
> +
> > +		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
> > +				roll, true);
> >  		if (ret)
> >  			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
> >  	}
> @@ -810,14 +794,13 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
>  
> >  	if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
> >  		if (omap_obj->dma_addr_cnt == 0) {
> > -			struct page **pages;
> >  			u32 npages = obj->size >> PAGE_SHIFT;
> >  			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
> >  			struct tiler_block *block;
>  
> >  			BUG_ON(omap_obj->block);
>  
> > -			ret = __omap_gem_get_pages(obj, &pages);
> > +			ret = omap_gem_attach_pages(obj);
> >  			if (ret)
> >  				goto fail;
>  
> @@ -837,7 +820,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
> >  			}
>  
> >  			/* TODO: enable async refill.. */
> > -			ret = tiler_pin(block, pages, npages,
> > +			ret = tiler_pin(block, omap_obj->pages, npages,
> >  					omap_obj->roll, true);
> >  			if (ret) {
> >  				tiler_release(block);
> @@ -946,16 +929,18 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
>  int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
> >  		bool remap)
>  {
> > +	struct omap_gem_object *omap_obj = to_omap_bo(obj);
> >  	int ret;
> +
> >  	if (!remap) {
> > -		struct omap_gem_object *omap_obj = to_omap_bo(obj);
> >  		if (!omap_obj->pages)
> >  			return -ENOMEM;
> >  		*pages = omap_obj->pages;
> >  		return 0;
> >  	}
> >  	mutex_lock(&obj->dev->struct_mutex);
> > -	ret = __omap_gem_get_pages(obj, pages);
> > +	ret = omap_gem_attach_pages(obj);
> > +	*pages = omap_obj->pages;
> >  	mutex_unlock(&obj->dev->struct_mutex);
> >  	return ret;
>  }
> @@ -980,13 +965,12 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
> >  	struct omap_gem_object *omap_obj = to_omap_bo(obj);
> >  	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> >  	if (!omap_obj->vaddr) {
> > -		struct page **pages;
> >  		int ret;
>  
> > -		ret = __omap_gem_get_pages(obj, &pages);
> > +		ret = omap_gem_attach_pages(obj);
> >  		if (ret)
> >  			return ERR_PTR(ret);
> > -		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
> > +		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
> >  				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
> >  	}
> >  	return omap_obj->vaddr;
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux