On Wed, Oct 21, 2015 at 10:21:19AM +0100, Chris Wilson wrote: > We now have two implementations for vmapping a whole object, one for > dma-buf and one for the ringbuffer. If we couple the vmapping into the > obj->pages lifetime, then we can reuse an obj->vmapping for both and at > the same time couple it into the shrinker. > > v2: Mark the failable kmalloc() as __GFP_NOWARN (vsyrjala) > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 12 ++++--- > drivers/gpu/drm/i915/i915_gem.c | 41 ++++++++++++++++++++++++ > drivers/gpu/drm/i915/i915_gem_dmabuf.c | 55 +++++---------------------------- > drivers/gpu/drm/i915/intel_ringbuffer.c | 53 ++++++++++--------------------- > 4 files changed, 72 insertions(+), 89 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 4093eedfd664..343a0a723d2c 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2089,10 +2089,7 @@ struct drm_i915_gem_object { > struct scatterlist *sg; > int last; > } get_page; > - > - /* prime dma-buf support */ > - void *dma_buf_vmapping; > - int vmapping_count; > + void *vmapping; > > /** Breadcrumb of last rendering to the buffer. > * There can only be one writer, but we allow for multiple readers. > @@ -2840,12 +2837,19 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) > BUG_ON(obj->pages == NULL); > obj->pages_pin_count++; > } > + > static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) > { > BUG_ON(obj->pages_pin_count == 0); > obj->pages_pin_count--; > } > > +void *__must_check i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj); erm, that should be in a header. > +static inline void i915_gem_object_unpin_vmap(struct drm_i915_gem_object *obj) > +{ > + i915_gem_object_unpin_pages(obj); > +} And this one maybe right next to it. Also, kerneldoc is missing for both of them. With that polish applied this is Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> Please push the entire pile once Dave Airlie has acked the drm part for drm_malloc_ab_gfp. -Daniel > + > int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); > int i915_gem_object_sync(struct drm_i915_gem_object *obj, > struct intel_engine_cs *to, > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 297d6bbbe0f2..872712c4b0ac 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2187,6 +2187,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) > ops->put_pages(obj); > obj->pages = NULL; > > + if (obj->vmapping) { > + vunmap(obj->vmapping); > + obj->vmapping = NULL; > + } > + > i915_gem_object_invalidate(obj); > > return 0; > @@ -2353,6 +2358,42 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) > return 0; > } > > +void *i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj) > +{ > + int ret; > + > + ret = i915_gem_object_get_pages(obj); > + if (ret) > + return ERR_PTR(ret); > + > + i915_gem_object_pin_pages(obj); > + > + if (obj->vmapping == NULL) { > + struct sg_page_iter sg_iter; > + struct page **pages; > + int n; > + > + n = obj->base.size >> PAGE_SHIFT; > + pages = kmalloc(n*sizeof(*pages), GFP_TEMPORARY | __GFP_NOWARN); > + if (pages == NULL) > + pages = drm_malloc_ab(n, sizeof(*pages)); > + if (pages != NULL) { > + n = 0; > + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) > + pages[n++] = sg_page_iter_page(&sg_iter); > + > + obj->vmapping = vmap(pages, n, 0, PAGE_KERNEL); > + drm_free_large(pages); > + } > + if (obj->vmapping == NULL) { > + i915_gem_object_unpin_pages(obj); > + return ERR_PTR(-ENOMEM); > + } > + } > + > + return obj->vmapping; > +} > + > void i915_vma_move_to_active(struct i915_vma *vma, > struct drm_i915_gem_request *req) > { > diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c > index e9c2bfd85b52..5c4163d3845a 100644 > --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c > +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c > @@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, > { > struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); > > - mutex_lock(&obj->base.dev->struct_mutex); > - > dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); > sg_free_table(sg); > kfree(sg); > > - i915_gem_object_unpin_pages(obj); > - > + mutex_lock(&obj->base.dev->struct_mutex); > + i915_gem_object_unpin_vmap(obj); > mutex_unlock(&obj->base.dev->struct_mutex); > } > > @@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) > { > struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); > struct drm_device *dev = obj->base.dev; > - struct sg_page_iter sg_iter; > - struct page **pages; > - int ret, i; > + void *addr; > + int ret; > > ret = i915_mutex_lock_interruptible(dev); > if (ret) > return ERR_PTR(ret); > > - if (obj->dma_buf_vmapping) { > - obj->vmapping_count++; > - goto out_unlock; > - } > - > - ret = i915_gem_object_get_pages(obj); > - if (ret) > - goto err; > - > - i915_gem_object_pin_pages(obj); > - > - ret = -ENOMEM; > - > - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); > - if (pages == NULL) > - goto err_unpin; > - > - i = 0; > - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) > - pages[i++] = sg_page_iter_page(&sg_iter); > - > - obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); > - drm_free_large(pages); > - > - if (!obj->dma_buf_vmapping) > - goto err_unpin; > - > - obj->vmapping_count = 1; > -out_unlock: > + addr = i915_gem_object_pin_vmap(obj); > mutex_unlock(&dev->struct_mutex); > - return obj->dma_buf_vmapping; > > -err_unpin: > - i915_gem_object_unpin_pages(obj); > -err: > - mutex_unlock(&dev->struct_mutex); > - return ERR_PTR(ret); > + return addr; > } > > static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) > @@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) > struct drm_device *dev = obj->base.dev; > > mutex_lock(&dev->struct_mutex); > - if (--obj->vmapping_count == 0) { > - vunmap(obj->dma_buf_vmapping); > - obj->dma_buf_vmapping = NULL; > - > - i915_gem_object_unpin_pages(obj); > - } > + i915_gem_object_unpin_pages(obj); > mutex_unlock(&dev->struct_mutex); > } > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index f81ec7785fac..49aa52440db2 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -1967,34 +1967,12 @@ static int init_phys_status_page(struct intel_engine_cs *ring) > void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) > { > if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) > - vunmap(ringbuf->virtual_start); > + i915_gem_object_unpin_vmap(ringbuf->obj); > else > iounmap(ringbuf->virtual_start); > - ringbuf->virtual_start = NULL; > i915_gem_object_ggtt_unpin(ringbuf->obj); > } > > -static u32 *vmap_obj(struct drm_i915_gem_object *obj) > -{ > - struct sg_page_iter sg_iter; > - struct page **pages; > - void *addr; > - int i; > - > - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); > - if (pages == NULL) > - return NULL; > - > - i = 0; > - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) > - pages[i++] = sg_page_iter_page(&sg_iter); > - > - addr = vmap(pages, i, 0, PAGE_KERNEL); > - drm_free_large(pages); > - > - return addr; > -} > - > int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, > struct intel_ringbuffer *ringbuf) > { > @@ -2008,15 +1986,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, > return ret; > > ret = i915_gem_object_set_to_cpu_domain(obj, true); > - if (ret) { > - i915_gem_object_ggtt_unpin(obj); > - return ret; > - } > + if (ret) > + goto unpin; > > - ringbuf->virtual_start = vmap_obj(obj); > - if (ringbuf->virtual_start == NULL) { > - i915_gem_object_ggtt_unpin(obj); > - return -ENOMEM; > + ringbuf->virtual_start = i915_gem_object_pin_vmap(obj); > + if (IS_ERR(ringbuf->virtual_start)) { > + ret = PTR_ERR(ringbuf->virtual_start); > + ringbuf->virtual_start = NULL; > + goto unpin; > } > } else { > ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); > @@ -2024,20 +2001,22 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, > return ret; > > ret = i915_gem_object_set_to_gtt_domain(obj, true); > - if (ret) { > - i915_gem_object_ggtt_unpin(obj); > - return ret; > - } > + if (ret) > + goto unpin; > > ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + > i915_gem_obj_ggtt_offset(obj), ringbuf->size); > if (ringbuf->virtual_start == NULL) { > - i915_gem_object_ggtt_unpin(obj); > - return -EINVAL; > + ret = -ENOMEM; > + goto unpin; > } > } > > return 0; > + > +unpin: > + i915_gem_object_ggtt_unpin(obj); > + return ret; > } > > void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) > -- > 2.6.1 > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx